repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
xakepru/x14.11-coding-hyperv
|
src/hyperv4/distorm/examples/tests/test_distorm3.py
|
4
|
68909
|
#
# Gil Dabah 2006, http://ragestorm.net/distorm
# Tests for diStorm3
#
import os
import distorm3
from distorm3 import *
import struct
import unittest
import random
REG_NONE = 255
_REGISTERS = ["RAX", "RCX", "RDX", "RBX", "RSP", "RBP", "RSI", "RDI", "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
"EAX", "ECX", "EDX", "EBX", "ESP", "EBP", "ESI", "EDI", "R8D", "R9D", "R10D", "R11D", "R12D", "R13D", "R14D", "R15D",
"AX", "CX", "DX", "BX", "SP", "BP", "SI", "DI", "R8W", "R9W", "R10W", "R11W", "R12W", "R13W", "R14W", "R15W",
"AL", "CL", "DL", "BL", "AH", "CH", "DH", "BH", "R8B", "R9B", "R10B", "R11B", "R12B", "R13B", "R14B", "R15B",
"SPL", "BPL", "SIL", "DIL",
"ES", "CS", "SS", "DS", "FS", "GS",
"RIP",
"ST0", "ST1", "ST2", "ST3", "ST4", "ST5", "ST6", "ST7",
"MM0", "MM1", "MM2", "MM3", "MM4", "MM5", "MM6", "MM7",
"XMM0", "XMM1", "XMM2", "XMM3", "XMM4", "XMM5", "XMM6", "XMM7", "XMM8", "XMM9", "XMM10", "XMM11", "XMM12", "XMM13", "XMM14", "XMM15",
"YMM0", "YMM1", "YMM2", "YMM3", "YMM4", "YMM5", "YMM6", "YMM7", "YMM8", "YMM9", "YMM10", "YMM11", "YMM12", "YMM13", "YMM14", "YMM15",
"CR0", "", "CR2", "CR3", "CR4", "", "", "", "CR8",
"DR0", "DR1", "DR2", "DR3", "", "", "DR6", "DR7"]
class Registers(object):
def __init__(self):
for i in enumerate(_REGISTERS):
if len(i[1]):
setattr(self, i[1], i[0])
Regs = Registers()
fbin = []
def Assemble(text, mode):
lines = text.replace("\n", "\r\n")
if mode is None:
mode = 32
lines = ("bits %d\r\n" % mode) + lines
open("1.asm", "wb").write(lines)
if mode == 64:
mode = "amd64"
else:
mode = "x86"
os.system("c:\\yasm.exe -m%s 1.asm" % mode)
return open("1", "rb").read()
class InstBin(unittest.TestCase):
def __init__(self, bin, mode):
bin = bin.decode("hex")
#fbin[mode].write(bin)
self.insts = Decompose(0, bin, mode)
self.inst = self.insts[0]
def check_valid(self, instsNo = 1):
self.assertNotEqual(self.inst.rawFlags, 65535)
self.assertEqual(len(self.insts), instsNo)
def check_invalid(self):
self.assertEqual(self.inst.rawFlags, 65535)
def check_mnemonic(self, mnemonic, instNo = 0):
self.assertNotEqual(self.inst.rawFlags, 65535)
self.assertEqual(self.insts[instNo].mnemonic, mnemonic)
class Inst(unittest.TestCase):
def __init__(self, instText, mode, instNo, features):
modeSize = [16, 32, 64][mode]
bin = Assemble(instText, modeSize)
#print map(lambda x: hex(ord(x)), bin)
#fbin[mode].write(bin)
self.insts = Decompose(0, bin, mode)
self.inst = self.insts[instNo]
def check_mnemonic(self, mnemonic):
self.assertEqual(self.inst.mnemonic, mnemonic)
def check_imm(self, n, val, sz):
self.assertEqual(self.inst.operands[n].type, distorm3.OPERAND_IMMEDIATE)
self.assertEqual(self.inst.operands[n].size, sz)
self.assertEqual(self.inst.operands[n].value, val)
def check_reg(self, n, idx, sz):
self.assertEqual(self.inst.operands[n].type, distorm3.OPERAND_REGISTER)
self.assertEqual(self.inst.operands[n].size, sz)
self.assertEqual(self.inst.operands[n].index, idx)
def check_pc(self, val, sz):
self.assertEqual(self.inst.operands[0].type, distorm3.OPERAND_IMMEDIATE)
self.assertEqual(self.inst.operands[0].size, sz)
self.assertEqual(self.inst.operands[0].value, val)
def check_disp(self, n, val, dispSize, derefSize):
self.assertEqual(self.inst.operands[n].type, distorm3.OPERAND_MEMORY)
self.assertEqual(self.inst.operands[n].dispSize, dispSize)
self.assertEqual(self.inst.operands[n].size, derefSize)
self.assertEqual(self.inst.operands[n].disp, val)
def check_abs_disp(self, n, val, dispSize, derefSize):
self.assertEqual(self.inst.operands[n].type, distorm3.OPERAND_ABSOLUTE_ADDRESS)
self.assertEqual(self.inst.operands[n].dispSize, dispSize)
self.assertEqual(self.inst.operands[n].size, derefSize)
self.assertEqual(self.inst.operands[n].disp, val)
def check_simple_deref(self, n, idx, derefSize):
""" Checks whether a (simple) memory dereference type is used, size of deref is in ops.size.
Displacement is ignored in this check. """
self.assertEqual(self.inst.operands[n].type, distorm3.OPERAND_MEMORY)
self.assertEqual(self.inst.operands[n].size, derefSize)
self.assertEqual(self.inst.operands[n].index, idx)
def check_deref(self, n, idx, base, derefSize):
""" Checks whether a memory dereference type is used, size of deref is in ops.size.
Base registers is in inst.base.
Displacement is ignored in this check. """
self.assertEqual(self.inst.operands[n].type, distorm3.OPERAND_MEMORY)
self.assertEqual(self.inst.operands[n].size, derefSize)
self.assertEqual(self.inst.operands[n].index, idx)
self.assertEqual(self.inst.operands[n].base, base)
def check_type_size(self, n, t, sz):
self.assertEqual(self.inst.operands[n].type, t)
self.assertEqual(self.inst.operands[n].size, sz)
def check_addr_size(self, sz):
self.assertEqual({0: 16, 1: 32, 2: 64}[(self.inst.rawFlags >> 10) & 3], sz)
def I16(instText, instNo = 0, features = 0):
return Inst(instText, Decode16Bits, instNo, features)
def I32(instText, features = 0):
return Inst(instText, Decode32Bits, 0, features)
def IB32(bin):
return InstBin(bin, Decode32Bits)
def I64(instText, features = 0):
return Inst(instText, Decode64Bits, 0, features)
def IB64(bin):
return InstBin(bin, Decode64Bits)
def ABS64(x):
return x
#return struct.unpack("q", struct.pack("Q", x))[0]
class TestMode16(unittest.TestCase):
Derefs = ["BX + SI", "BX + DI", "BP + SI", "BP + DI", "SI", "DI", "BP", "BX"]
DerefsInfo = [(Regs.BX, Regs.SI), (Regs.BX, Regs.DI), (Regs.BP, Regs.SI), (Regs.BP, Regs.DI),
(Regs.SI,), (Regs.DI,), (Regs.BP,), (Regs.BX,)]
def test_none(self):
self.failIf(len(I16("cbw").inst.operands) > 0)
def test_imm8(self):
I16("int 0x55").check_imm(0, 0x55, 8)
def test_imm16(self):
I16("ret 0x1122").check_imm(0, 0x1122, 16)
def test_imm_full(self):
I16("push 0x1234").check_imm(0, 0x1234, 16)
def test_imm_aadm(self):
I16("aam").check_imm(0, 0xa, 8)
I16("aam 0x15").check_imm(0, 0x15, 8)
I16("aad").check_imm(0, 0xa, 8)
I16("aad 0x51").check_imm(0, 0x51, 8)
def test_seimm(self):
I16("push 5").check_imm(0, 0x5, 8)
a = I16("push -6")
self.assertEqual(a.inst.size, 2)
a.check_type_size(0, distorm3.OPERAND_IMMEDIATE, 8)
self.failIf(ABS64(a.inst.operands[0].value) != -6)
a = I16("db 0x66\n push -5")
self.assertEqual(a.inst.size, 3)
a.check_type_size(0, distorm3.OPERAND_IMMEDIATE, 32)
self.failIf(ABS64(a.inst.operands[0].value) != -5)
def test_imm16_1_imm8_2(self):
a = I16("enter 0x1234, 0x40")
a.check_imm(0, 0x1234, 16)
a.check_imm(1, 0x40, 8)
def test_imm8_1_imm8_2(self):
a = I16("extrq xmm0, 0x55, 0xff")
a.check_imm(1, 0x55, 8)
a.check_imm(2, 0xff, 8)
def test_reg8(self):
I16("inc dh").check_reg(0, Regs.DH, 8)
def test_reg16(self):
I16("arpl ax, bp").check_reg(1, Regs.BP, 16)
def test_reg_full(self):
I16("dec di").check_reg(0, Regs.DI, 16)
def test_reg32(self):
I16("movmskps ebx, xmm6").check_reg(0, Regs.EBX, 32)
def test_reg32_64(self):
I16("cvttsd2si esp, xmm3").check_reg(0, Regs.ESP, 32)
def test_freg32_64_rm(self):
I16("mov cr0, eax").check_reg(1, Regs.EAX, 32)
def test_rm8(self):
I16("seto dh").check_reg(0, Regs.DH, 8)
def test_rm16(self):
I16("str di").check_reg(0, Regs.DI, 16)
def test_rm_full(self):
I16("push bp").check_reg(0, Regs.BP, 16)
def test_rm32_64(self):
I16("movd xmm0, ebx").check_reg(1, Regs.EBX, 32)
def test_fpum16(self):
I16("fiadd word [bx]").check_simple_deref(0, Regs.BX, 16)
def test_fpum32(self):
I16("fisttp dword [si]").check_simple_deref(0, Regs.SI, 32)
def test_fpum64(self):
I16("fadd qword [esp]").check_simple_deref(0, Regs.ESP, 64)
def test_fpum80(self):
I16("fbld [eax]").check_simple_deref(0, Regs.EAX, 80)
def test_r32_m8(self):
I16("pinsrb xmm4, eax, 0x55").check_reg(1, Regs.EAX, 32)
I16("pinsrb xmm4, [bx], 0x55").check_simple_deref(1, Regs.BX, 8)
def test_r32_m16(self):
I16("pinsrw xmm4, edi, 0x55").check_reg(1, Regs.EDI, 32)
I16("pinsrw xmm1, word [si], 0x55").check_simple_deref(1, Regs.SI, 16)
def test_r32_64_m8(self):
I16("pextrb eax, xmm4, 0xaa").check_reg(0, Regs.EAX, 32)
I16("pextrb [bx], xmm2, 0xaa").check_simple_deref(0, Regs.BX, 8)
def test_r32_64_m16(self):
I16("pextrw esp, xmm7, 0x11").check_reg(0, Regs.ESP, 32)
I16("pextrw [bp], xmm0, 0xbb").check_simple_deref(0, Regs.BP, 16)
def test_rfull_m16(self):
I16("smsw ax").check_reg(0, Regs.AX, 16)
I16("smsw [bx]").check_simple_deref(0, Regs.BX, 16)
def test_creg(self):
I16("mov esp, cr3").check_reg(1, Regs.CR3, 32)
#I16("mov esp, cr8").check_reg(1, Regs.CR8, 32)
def test_dreg(self):
I16("mov edi, dr7").check_reg(1, Regs.DR7, 32)
def test_sreg(self):
I16("mov ax, ds").check_reg(1, Regs.DS, 16)
def test_seg(self):
I16("push fs").check_reg(0, Regs.FS, 16)
I16("db 0x66\n push es").check_reg(0, Regs.ES, 16)
def test_acc8(self):
I16("in al, 0x60").check_reg(0, Regs.AL, 8)
def test_acc_full(self):
I16("add ax, 0x100").check_reg(0, Regs.AX, 16)
def test_acc_full_not64(self):
I16("out 0x64, ax").check_reg(1, Regs.AX, 16)
def test_mem16_full(self):
I16("call far [bp]").check_simple_deref(0, Regs.BP, 16)
def test_ptr16_full(self):
a = I16("jmp 0xffff:0x1234").inst
self.assertEqual(a.size, 5)
self.assertEqual(a.operands[0].type, distorm3.OPERAND_FAR_MEMORY)
self.assertEqual(a.operands[0].size, 16)
self.assertEqual(a.operands[0].seg, 0xffff)
self.assertEqual(a.operands[0].off, 0x1234)
def test_mem16_3264(self):
I16("sgdt [bx]").check_simple_deref(0, Regs.BX, 32)
def test_relcb(self):
a = I16("db 0xe9\ndw 0x00")
a.check_pc(3, 16)
a = I16("db 0xe2\ndb 0x50")
a.check_pc(0x52, 8)
a = I16("db 0xe2\ndb 0xfd")
a.check_pc(-1, 8)
a = I16("db 0x67\ndb 0xe2\ndb 0xf0")
a.check_pc(-0xd, 8)
def test_relc_full(self):
a = I16("jmp 0x100")
self.assertEqual(a.inst.size, 3)
a.check_type_size(0, distorm3.OPERAND_IMMEDIATE, 16)
def test_mem(self):
I16("lea ax, [bx]").check_simple_deref(1, Regs.BX, 0)
def test_mem32(self):
I16("movntss [ebx], xmm5").check_simple_deref(0, Regs.EBX, 32)
def test_mem32_64(self):
I16("movnti [ebx], eax").check_simple_deref(0, Regs.EBX, 32)
def test_mem64(self):
I16("movlps [edi], xmm7").check_simple_deref(0, Regs.EDI, 64)
def test_mem128(self):
I16("movntps [eax], xmm3").check_simple_deref(0, Regs.EAX, 128)
def test_mem64_128(self):
I16("cmpxchg8b [edx]").check_simple_deref(0, Regs.EDX, 64)
def test_moffs8(self):
I16("mov al, [0x1234]").check_abs_disp(1, 0x1234, 16, 8)
I16("mov [dword 0x11112222], al").check_abs_disp(0, 0x11112222, 32, 8)
def test_moff_full(self):
I16("mov [0x8765], ax").check_abs_disp(0, 0x8765, 16, 16)
I16("mov ax, [dword 0x11112222]").check_abs_disp(1, 0x11112222, 32, 16)
def test_const1(self):
I16("shl si, 1").check_imm(1, 1, 8)
def test_regcl(self):
I16("rcl bp, cl").check_reg(1, Regs.CL, 8)
def test_ib_rb(self):
I16("mov dl, 0x88").check_reg(0, Regs.DL, 8)
def test_ib_r_dw_qw(self):
I16("bswap ecx").check_reg(0, Regs.ECX, 32)
def test_ib_r_full(self):
I16("inc si").check_reg(0, Regs.SI, 16)
def test_regi_esi(self):
I16("lodsb").check_simple_deref(1, Regs.SI, 8)
I16("cmpsw").check_simple_deref(0, Regs.SI, 16)
I16("lodsd").check_simple_deref(1, Regs.SI, 32)
def test_regi_edi(self):
I16("movsb").check_simple_deref(0, Regs.DI, 8)
I16("scasw").check_simple_deref(0, Regs.DI, 16)
I16("stosd").check_simple_deref(0, Regs.DI, 32)
def test_regi_ebxal(self):
a = I16("xlatb")
a.check_type_size(0, distorm3.OPERAND_MEMORY, 8)
self.failIf(a.inst.operands[0].index != Regs.AL)
self.failIf(a.inst.operands[0].base != Regs.BX)
def test_regi_eax(self):
I16("vmrun [ax]").check_simple_deref(0, Regs.AX, 16)
def test_regdx(self):
I16("in ax, dx").check_reg(1, Regs.DX, 16)
def test_regecx(self):
I16("invlpga [eax], ecx").check_reg(1, Regs.ECX, 32)
def test_fpu_si(self):
I16("fxch st4").check_reg(0, Regs.ST4, 32)
def test_fpu_ssi(self):
a = I16("fcmovnbe st0, st3")
a.check_reg(0, Regs.ST0, 32)
a.check_reg(1, Regs.ST3, 32)
def test_fpu_sis(self):
a = I16("fadd st3, st0")
a.check_reg(0, Regs.ST3, 32)
a.check_reg(1, Regs.ST0, 32)
def test_mm(self):
I16("pand mm0, mm7").check_reg(0, Regs.MM0, 64)
def test_mm_rm(self):
I16("psllw mm0, 0x55").check_reg(0, Regs.MM0, 64)
def test_mm32(self):
I16("punpcklbw mm1, [si]").check_simple_deref(1, Regs.SI, 32)
def test_mm64(self):
I16("packsswb mm3, [bx]").check_simple_deref(1, Regs.BX, 64)
def test_xmm(self):
I16("orps xmm5, xmm4").check_reg(0, Regs.XMM5, 128)
def test_xmm_rm(self):
I16("psrlw xmm6, 0x12").check_reg(0, Regs.XMM6, 128)
def test_xmm16(self):
I16("pmovsxbq xmm3, [bp]").check_simple_deref(1, Regs.BP, 16)
def test_xmm32(self):
I16("pmovsxwq xmm5, [di]").check_simple_deref(1, Regs.DI, 32)
def test_xmm64(self):
I16("roundsd xmm6, [si], 0x55").check_simple_deref(1, Regs.SI, 64)
def test_xmm128(self):
I16("roundpd xmm7, [bx], 0xaa").check_simple_deref(1, Regs.BX, 128)
def test_regxmm0(self):
I16("blendvpd xmm1, xmm3, xmm0").check_reg(2, Regs.XMM0, 128)
def test_disp_only(self):
a = I16("add [0x1234], bx")
a.check_type_size(0, distorm3.OPERAND_ABSOLUTE_ADDRESS, 16)
self.failIf(a.inst.operands[0].dispSize != 16)
self.failIf(a.inst.operands[0].disp != 0x1234)
def test_modrm(self):
texts = ["ADD [%s], AX" % i for i in self.Derefs]
for i in enumerate(texts):
a = I16(i[1])
if len(self.DerefsInfo[i[0]]) == 2:
a.check_deref(0, self.DerefsInfo[i[0]][1], self.DerefsInfo[i[0]][0], 16)
else:
a.check_simple_deref(0, self.DerefsInfo[i[0]][0], 16)
def test_modrm_disp8(self):
texts = ["ADD [%s + 0x55], AX" % i for i in self.Derefs]
for i in enumerate(texts):
a = I16(i[1])
if len(self.DerefsInfo[i[0]]) == 2:
a.check_deref(0, self.DerefsInfo[i[0]][1], self.DerefsInfo[i[0]][0], 16)
else:
a.check_simple_deref(0, self.DerefsInfo[i[0]][0], 16)
self.failIf(a.inst.operands[0].dispSize != 8)
self.failIf(a.inst.operands[0].disp != 0x55)
def test_modrm_disp16(self):
texts = ["ADD [%s + 0x3322], AX" % i for i in self.Derefs]
for i in enumerate(texts):
a = I16(i[1])
if len(self.DerefsInfo[i[0]]) == 2:
a.check_deref(0, self.DerefsInfo[i[0]][1], self.DerefsInfo[i[0]][0], 16)
else:
a.check_simple_deref(0, self.DerefsInfo[i[0]][0], 16)
self.failIf(a.inst.operands[0].dispSize != 16)
self.failIf(a.inst.operands[0].disp != 0x3322)
class TestMode32(unittest.TestCase):
Derefs = ["EAX", "ECX", "EDX", "EBX", "EBP", "ESI", "EDI"]
DerefsInfo = [Regs.EAX, Regs.ECX, Regs.EDX, Regs.EBX, Regs.EBP, Regs.ESI, Regs.EDI]
def test_none(self):
self.failIf(len(I32("cdq").inst.operands) > 0)
def test_imm8(self):
I32("int 0x55").check_imm(0, 0x55, 8)
def test_imm16(self):
I32("ret 0x1122").check_imm(0, 0x1122, 16)
def test_imm_full(self):
I32("push 0x12345678").check_imm(0, 0x12345678, 32)
def test_imm_aadm(self):
I32("aam").check_imm(0, 0xa, 8)
I32("aam 0x15").check_imm(0, 0x15, 8)
I32("aad").check_imm(0, 0xa, 8)
I32("aad 0x51").check_imm(0, 0x51, 8)
def test_seimm(self):
I32("push 6").check_imm(0, 0x6, 8)
a = I32("push -7")
self.assertEqual(a.inst.size, 2)
a.check_type_size(0, distorm3.OPERAND_IMMEDIATE, 8)
self.failIf(ABS64(a.inst.operands[0].value) != -7)
a = I32("db 0x66\n push -5")
self.assertEqual(a.inst.size, 3)
a.check_type_size(0, distorm3.OPERAND_IMMEDIATE, 16)
self.failIf(ABS64(a.inst.operands[0].value) != -5)
def test_imm16_1_imm8_2(self):
a = I32("enter 0x1234, 0x40")
a.check_imm(0, 0x1234, 16)
a.check_imm(1, 0x40, 8)
def test_imm8_1_imm8_2(self):
a = I32("extrq xmm0, 0x55, 0xff")
a.check_imm(1, 0x55, 8)
a.check_imm(2, 0xff, 8)
def test_reg8(self):
I32("inc dh").check_reg(0, Regs.DH, 8)
def test_reg16(self):
I32("arpl ax, bp").check_reg(1, Regs.BP, 16)
def test_reg_full(self):
I32("dec edi").check_reg(0, Regs.EDI, 32)
def test_reg32(self):
I32("movmskps ebx, xmm6").check_reg(0, Regs.EBX, 32)
def test_reg32_64(self):
I32("cvttsd2si esp, xmm3").check_reg(0, Regs.ESP, 32)
def test_freg32_64_rm(self):
I32("mov cr0, eax").check_reg(1, Regs.EAX, 32)
def test_rm8(self):
I32("seto dh").check_reg(0, Regs.DH, 8)
def test_rm16(self):
I32("verr di").check_reg(0, Regs.DI, 16)
def test_rm_full(self):
I32("push ebp").check_reg(0, Regs.EBP, 32)
def test_rm32_64(self):
I32("movd xmm0, ebx").check_reg(1, Regs.EBX, 32)
def test_fpum16(self):
I32("fiadd word [ebx]").check_simple_deref(0, Regs.EBX, 16)
def test_fpum32(self):
I32("fisttp dword [esi]").check_simple_deref(0, Regs.ESI, 32)
def test_fpum64(self):
I32("fadd qword [esp]").check_simple_deref(0, Regs.ESP, 64)
def test_fpum80(self):
I32("fbld [eax]").check_simple_deref(0, Regs.EAX, 80)
def test_r32_m8(self):
I32("pinsrb xmm4, eax, 0x55").check_reg(1, Regs.EAX, 32)
I32("pinsrb xmm4, [ebx], 0x55").check_simple_deref(1, Regs.EBX, 8)
def test_r32_m16(self):
I32("pinsrw xmm4, edi, 0x55").check_reg(1, Regs.EDI, 32)
I32("pinsrw xmm1, word [esi], 0x55").check_simple_deref(1, Regs.ESI, 16)
def test_r32_64_m8(self):
I32("pextrb eax, xmm4, 0xaa").check_reg(0, Regs.EAX, 32)
I32("pextrb [ebx], xmm2, 0xaa").check_simple_deref(0, Regs.EBX, 8)
def test_r32_64_m16(self):
I32("pextrw esp, xmm7, 0x11").check_reg(0, Regs.ESP, 32)
I32("pextrw [ebp], xmm0, 0xbb").check_simple_deref(0, Regs.EBP, 16)
def test_rfull_m16(self):
I32("smsw eax").check_reg(0, Regs.EAX, 32)
I32("smsw [ebx]").check_simple_deref(0, Regs.EBX, 16)
def test_creg(self):
I32("mov esp, cr3").check_reg(1, Regs.CR3, 32)
def test_dreg(self):
I32("mov edi, dr7").check_reg(1, Regs.DR7, 32)
def test_sreg(self):
I32("mov ax, ds").check_reg(1, Regs.DS, 16)
def test_seg(self):
I32("push ss").check_reg(0, Regs.SS, 16)
I32("db 0x66\n push ds").check_reg(0, Regs.DS, 16)
def test_acc8(self):
I32("in al, 0x60").check_reg(0, Regs.AL, 8)
def test_acc_full(self):
I32("add eax, 0x100").check_reg(0, Regs.EAX, 32)
def test_acc_full_not64(self):
I32("out 0x64, eax").check_reg(1, Regs.EAX, 32)
def test_mem16_full(self):
I32("call far [ebp]").check_simple_deref(0, Regs.EBP, 32)
def test_ptr16_full(self):
a = I32("jmp 0xffff:0x12345678").inst
self.assertEqual(a.size, 7)
self.assertEqual(a.operands[0].type, distorm3.OPERAND_FAR_MEMORY)
self.assertEqual(a.operands[0].size, 32)
self.assertEqual(a.operands[0].seg, 0xffff)
self.assertEqual(a.operands[0].off, 0x12345678)
def test_mem16_3264(self):
I32("sgdt [ebx]").check_simple_deref(0, Regs.EBX, 32)
def test_relcb(self):
a = I32("db 0xe9\ndd 0x00")
a.check_pc(5, 32)
a = I32("db 0xe2\ndb 0x50")
a.check_pc(0x52, 8)
a = I32("db 0xe2\ndb 0xfd")
a.check_pc(-1, 8)
a = I32("db 0x67\ndb 0xe2\ndb 0xf0")
a.check_pc(-0xd, 8)
def test_relc_full(self):
a = I32("jmp 0x100")
self.assertEqual(a.inst.size, 5)
a.check_type_size(0, distorm3.OPERAND_IMMEDIATE, 32)
def test_mem(self):
I32("lea ax, [ebx]").check_simple_deref(1, Regs.EBX, 0)
def test_mem32(self):
I32("movntss [ebx], xmm5").check_simple_deref(0, Regs.EBX, 32)
def test_mem32_64(self):
I32("movnti [edi], eax").check_simple_deref(0, Regs.EDI, 32)
def test_mem64(self):
I32("movlps [edi], xmm7").check_simple_deref(0, Regs.EDI, 64)
def test_mem128(self):
I32("movntps [eax], xmm3").check_simple_deref(0, Regs.EAX, 128)
def test_mem64_128(self):
I32("cmpxchg8b [edx]").check_simple_deref(0, Regs.EDX, 64)
def test_moffs8(self):
I32("mov al, [word 0x5678]").check_abs_disp(1, 0x5678, 16, 8)
I32("mov [0x11112222], al").check_abs_disp(0, 0x11112222, 32, 8)
def test_moff_full(self):
I32("mov [word 0x4321], eax").check_abs_disp(0, 0x4321, 16, 32)
I32("mov eax, [0x11112222]").check_abs_disp(1, 0x11112222, 32, 32)
def test_const1(self):
I32("shl esi, 1").check_imm(1, 1, 8)
def test_regcl(self):
I32("rcl ebp, cl").check_reg(1, Regs.CL, 8)
def test_ib_rb(self):
I32("mov dl, 0x88").check_reg(0, Regs.DL, 8)
def test_ib_r_dw_qw(self):
I32("bswap ecx").check_reg(0, Regs.ECX, 32)
def test_ib_r_full(self):
I32("inc esi").check_reg(0, Regs.ESI, 32)
def test_regi_esi(self):
I32("lodsb").check_simple_deref(1, Regs.ESI, 8)
I32("cmpsw").check_simple_deref(0, Regs.ESI, 16)
I32("lodsd").check_simple_deref(1, Regs.ESI, 32)
def test_regi_edi(self):
I32("movsb").check_simple_deref(0, Regs.EDI, 8)
I32("scasw").check_simple_deref(0, Regs.EDI, 16)
I32("stosd").check_simple_deref(0, Regs.EDI, 32)
def test_regi_ebxal(self):
a = I32("xlatb")
a.check_type_size(0, distorm3.OPERAND_MEMORY, 8)
self.failIf(a.inst.operands[0].index != Regs.AL)
self.failIf(a.inst.operands[0].base != Regs.EBX)
def test_regi_eax(self):
I32("vmrun [eax]").check_simple_deref(0, Regs.EAX, 32)
def test_regdx(self):
I32("in eax, dx").check_reg(1, Regs.DX, 16)
def test_regecx(self):
I32("invlpga [eax], ecx").check_reg(1, Regs.ECX, 32)
def test_fpu_si(self):
I32("fxch st4").check_reg(0, Regs.ST4, 32)
def test_fpu_ssi(self):
a = I32("fcmovnbe st0, st3")
a.check_reg(0, Regs.ST0, 32)
a.check_reg(1, Regs.ST3, 32)
def test_fpu_sis(self):
a = I32("fadd st3, st0")
a.check_reg(0, Regs.ST3, 32)
a.check_reg(1, Regs.ST0, 32)
def test_mm(self):
I32("pand mm0, mm7").check_reg(0, Regs.MM0, 64)
def test_mm_rm(self):
I32("psllw mm0, 0x55").check_reg(0, Regs.MM0, 64)
def test_mm32(self):
I32("punpcklbw mm1, [esi]").check_simple_deref(1, Regs.ESI, 32)
def test_mm64(self):
I32("packsswb mm3, [ebx]").check_simple_deref(1, Regs.EBX, 64)
def test_xmm(self):
I32("orps xmm5, xmm4").check_reg(0, Regs.XMM5, 128)
def test_xmm_rm(self):
I32("psrlw xmm6, 0x12").check_reg(0, Regs.XMM6, 128)
def test_xmm16(self):
I32("pmovsxbq xmm3, [ebp]").check_simple_deref(1, Regs.EBP, 16)
def test_xmm32(self):
I32("pmovsxwq xmm5, [edi]").check_simple_deref(1, Regs.EDI, 32)
def test_xmm64(self):
I32("roundsd xmm6, [esi], 0x55").check_simple_deref(1, Regs.ESI, 64)
def test_xmm128(self):
I32("roundpd xmm7, [ebx], 0xaa").check_simple_deref(1, Regs.EBX, 128)
def test_regxmm0(self):
I32("blendvpd xmm1, xmm3, xmm0").check_reg(2, Regs.XMM0, 128)
def test_cr8(self):
I32("db 0xf0\n mov cr0, eax").check_reg(0, Regs.CR8, 32)
def test_disp_only(self):
a = I32("add [0x12345678], ebx")
a.check_type_size(0, distorm3.OPERAND_ABSOLUTE_ADDRESS, 32)
self.failIf(a.inst.operands[0].dispSize != 32)
self.failIf(a.inst.operands[0].disp != 0x12345678)
def test_modrm(self):
texts = ["ADD [%s], EDI" % i for i in self.Derefs]
for i in enumerate(texts):
a = I32(i[1])
a.check_simple_deref(0, self.DerefsInfo[i[0]], 32)
def test_modrm_disp8(self):
texts = ["ADD [%s + 0x55], ESI" % i for i in self.Derefs]
for i in enumerate(texts):
a = I32(i[1])
a.check_simple_deref(0, self.DerefsInfo[i[0]], 32)
self.failIf(a.inst.operands[0].dispSize != 8)
self.failIf(a.inst.operands[0].disp != 0x55)
def test_modrm_disp32(self):
texts = ["ADD [%s + 0x33221144], EDX" % i for i in self.Derefs]
for i in enumerate(texts):
a = I32(i[1])
a.check_simple_deref(0, self.DerefsInfo[i[0]], 32)
self.failIf(a.inst.operands[0].dispSize != 32)
self.failIf(a.inst.operands[0].disp != 0x33221144)
def test_base_ebp(self):
a = I32("mov [ebp+0x55], eax")
a.check_simple_deref(0, Regs.EBP, 32)
self.failIf(a.inst.operands[0].dispSize != 8)
self.failIf(a.inst.operands[0].disp != 0x55)
a = I32("mov [ebp+0x55+eax], eax")
a.check_deref(0, Regs.EAX, Regs.EBP, 32)
self.failIf(a.inst.operands[0].dispSize != 8)
self.failIf(a.inst.operands[0].disp != 0x55)
a = I32("mov [ebp+0x55443322], eax")
a.check_simple_deref(0, Regs.EBP, 32)
self.failIf(a.inst.operands[0].dispSize != 32)
self.failIf(a.inst.operands[0].disp != 0x55443322)
Bases = ["EAX", "ECX", "EDX", "EBX", "ESP", "ESI", "EDI"]
BasesInfo = [Regs.EAX, Regs.ECX, Regs.EDX, Regs.EBX, Regs.ESP, Regs.ESI, Regs.EDI]
Indices = ["EAX", "ECX", "EDX", "EBX", "EBP", "ESI", "EDI"]
IndicesInfo = [Regs.EAX, Regs.ECX, Regs.EDX, Regs.EBX, Regs.EBP, Regs.ESI, Regs.EDI]
def test_bases(self):
for i in enumerate(self.Bases):
a = I32("cmp ebp, [%s]" % (i[1]))
a.check_simple_deref(1, self.BasesInfo[i[0]], 32)
def test_bases_disp32(self):
for i in enumerate(self.Bases):
a = I32("cmp ebp, [%s+0x12345678]" % (i[1]))
a.check_simple_deref(1, self.BasesInfo[i[0]], 32)
self.failIf(a.inst.operands[1].dispSize != 32)
self.failIf(a.inst.operands[1].disp != 0x12345678)
def test_scales(self):
for i in enumerate(self.Indices):
# A scale of 2 causes the scale to be omitted and changed from reg*2 to reg+reg.
for s in [4, 8]:
a = I32("and bp, [%s*%d]" % (i[1], s))
a.check_deref(1, self.IndicesInfo[i[0]], None, 16)
self.failIf(a.inst.operands[1].scale != s)
def test_sib(self):
for i in enumerate(self.Indices):
for j in enumerate(self.Bases):
for s in [1, 2, 4, 8]:
a = I32("or bp, [%s*%d + %s]" % (i[1], s, j[1]))
a.check_deref(1, self.IndicesInfo[i[0]], self.BasesInfo[j[0]], 16)
if s != 1:
self.failIf(a.inst.operands[1].scale != s)
def test_sib_disp8(self):
for i in enumerate(self.Indices):
for j in enumerate(self.Bases):
for s in [1, 2, 4, 8]:
a = I32("xor al, [%s*%d + %s + 0x55]" % (i[1], s, j[1]))
a.check_deref(1, self.IndicesInfo[i[0]], self.BasesInfo[j[0]], 8)
self.failIf(a.inst.operands[1].dispSize != 8)
self.failIf(a.inst.operands[1].disp != 0x55)
if s != 1:
self.failIf(a.inst.operands[1].scale != s)
def test_sib_disp32(self):
for i in enumerate(self.Indices):
for j in enumerate(self.Bases):
for s in [1, 2, 4, 8]:
a = I32("sub ebp, [%s*%d + %s + 0x55aabbcc]" % (i[1], s, j[1]))
a.check_deref(1, self.IndicesInfo[i[0]], self.BasesInfo[j[0]], 32)
self.failIf(a.inst.operands[1].dispSize != 32)
self.failIf(a.inst.operands[1].disp != 0x55aabbcc)
if s != 1:
self.failIf(a.inst.operands[1].scale != s)
class TestMode64(unittest.TestCase):
Derefs = ["RAX", "RCX", "RDX", "RBX", "RBP", "RSI", "RDI"]
DerefsInfo = [Regs.RAX, Regs.RCX, Regs.RDX, Regs.RBX, Regs.RBP, Regs.RSI, Regs.RDI]
def test_none(self):
self.failIf(len(I64("cdq").inst.operands) > 0)
def test_imm8(self):
I64("int 0x55").check_imm(0, 0x55, 8)
def test_imm16(self):
I64("ret 0x1122").check_imm(0, 0x1122, 16)
def test_imm_full(self):
I64("push 0x12345678").check_imm(0, 0x12345678, 64)
def test_imm_aadm(self):
#I64("aam").check_imm(0, 0xa, 8)
#I64("aam 0x15").check_imm(0, 0x15, 8)
#I64("aad").check_imm(0, 0xa, 8)
#I64("aad 0x51").check_imm(0, 0x51, 8)
pass
def test_seimm(self):
I64("push 6").check_imm(0, 0x6, 8)
a = I64("push -7")
self.assertEqual(a.inst.size, 2)
a.check_type_size(0, distorm3.OPERAND_IMMEDIATE, 8)
self.failIf(ABS64(a.inst.operands[0].value) != -7)
def test_imm16_1_imm8_2(self):
a = I64("enter 0x1234, 0x40")
a.check_imm(0, 0x1234, 16)
a.check_imm(1, 0x40, 8)
def test_imm8_1_imm8_2(self):
a = I64("extrq xmm0, 0x55, 0xff")
a.check_imm(1, 0x55, 8)
a.check_imm(2, 0xff, 8)
def test_reg8(self):
I64("inc dh").check_reg(0, Regs.DH, 8)
def test_reg_full(self):
I64("dec rdi").check_reg(0, Regs.RDI, 64)
I64("cmp r15, r14").check_reg(0, Regs.R15, 64)
I64("cmp r8d, r9d").check_reg(0, Regs.R8D, 32)
I64("cmp r9w, r8w").check_reg(0, Regs.R9W, 16)
def test_reg32(self):
I64("movmskps ebx, xmm6").check_reg(0, Regs.EBX, 32)
I64("movmskps r11d, xmm6").check_reg(0, Regs.R11D, 32)
def test_reg32_64(self):
I64("cvttsd2si rsp, xmm3").check_reg(0, Regs.RSP, 64)
I64("cvttsd2si r14, xmm3").check_reg(0, Regs.R14, 64)
def test_freg32_64_rm(self):
I64("mov cr0, rax").check_reg(1, Regs.RAX, 64)
I64("mov cr0, r14").check_reg(1, Regs.R14, 64)
def test_rm8(self):
I64("seto dh").check_reg(0, Regs.DH, 8)
def test_rm16(self):
I64("verr di").check_reg(0, Regs.DI, 16)
I64("verr r8w").check_reg(0, Regs.R8W, 16)
def test_rm_full(self):
I64("push rbp").check_reg(0, Regs.RBP, 64)
def test_rm32_64(self):
I64("movq xmm0, rdx").check_reg(1, Regs.RDX, 64)
I64("movq xmm0, r10").check_reg(1, Regs.R10, 64)
I64("cvtsi2sd xmm0, rdx").check_reg(1, Regs.RDX, 64)
I64("vmread rax, rax").check_reg(1, Regs.RAX, 64)
def test_rm16_32(self):
I64("movsxd rax, eax").check_reg(1, Regs.EAX, 32)
I64("movzx rax, ax").check_reg(1, Regs.AX, 16)
def test_fpum16(self):
I64("fiadd word [rbx]").check_simple_deref(0, Regs.RBX, 16)
def test_fpum32(self):
I64("fisttp dword [rsi]").check_simple_deref(0, Regs.RSI, 32)
def test_fpum64(self):
I64("fadd qword [rsp]").check_simple_deref(0, Regs.RSP, 64)
def test_fpum80(self):
I64("fbld [rax]").check_simple_deref(0, Regs.RAX, 80)
def test_r32_m8(self):
I64("pinsrb xmm4, eax, 0x55").check_reg(1, Regs.EAX, 32)
I64("pinsrb xmm4, [rbx], 0x55").check_simple_deref(1, Regs.RBX, 8)
def test_r32_m16(self):
I64("pinsrw xmm4, edi, 0x55").check_reg(1, Regs.EDI, 32)
I64("pinsrw xmm1, word [rsi], 0x55").check_simple_deref(1, Regs.RSI, 16)
I64("pinsrw xmm1, r8d, 0x55").check_reg(1, Regs.R8D, 32)
def test_r32_64_m8(self):
I64("pextrb eax, xmm4, 0xaa").check_reg(0, Regs.EAX, 32)
I64("pextrb [rbx], xmm2, 0xaa").check_simple_deref(0, Regs.RBX, 8)
def test_r32_64_m16(self):
I64("pextrw esp, xmm7, 0x11").check_reg(0, Regs.ESP, 32)
I64("pextrw [rbp], xmm0, 0xbb").check_simple_deref(0, Regs.RBP, 16)
def test_rfull_m16(self):
I64("smsw eax").check_reg(0, Regs.EAX, 32)
I64("smsw [rbx]").check_simple_deref(0, Regs.RBX, 16)
def test_creg(self):
I64("mov rsp, cr3").check_reg(1, Regs.CR3, 64)
I64("mov cr8, rdx").check_reg(0, Regs.CR8, 64)
def test_dreg(self):
I64("mov rdi, dr7").check_reg(1, Regs.DR7, 64)
def test_sreg(self):
I64("mov ax, fs").check_reg(1, Regs.FS, 16)
def test_seg(self):
I64("push gs").check_reg(0, Regs.GS, 16)
def test_acc8(self):
I64("in al, 0x60").check_reg(0, Regs.AL, 8)
def test_acc_full(self):
I64("add rax, 0x100").check_reg(0, Regs.RAX, 64)
def test_acc_full_not64(self):
I64("out 0x64, eax").check_reg(1, Regs.EAX, 32)
def test_mem16_full(self):
I64("call far [rbp]").check_simple_deref(0, Regs.RBP, 32)
I64("db 0x48\n call far [rbp]").check_simple_deref(0, Regs.RBP, 64)
def test_mem16_3264(self):
I64("sgdt [rbx]").check_simple_deref(0, Regs.RBX, 64)
def test_relcb(self):
a = I64("db 0xe9\ndd 0x00")
a.check_pc(5, 32)
a = I64("db 0xe2\ndb 0x50")
a.check_pc(0x52, 8)
a = I64("db 0xe2\ndb 0xfd")
a.check_pc(-1, 8)
a = I64("db 0x67\ndb 0xe2\ndb 0xf0")
a.check_pc(-0xd, 8)
def test_relc_full(self):
a = I64("jmp 0x100")
self.assertEqual(a.inst.size, 5)
a.check_type_size(0, distorm3.OPERAND_IMMEDIATE, 32)
def test_mem(self):
I64("lea ax, [rbx]").check_simple_deref(1, Regs.RBX, 0)
def test_mem32(self):
I64("movntss [rbx], xmm5").check_simple_deref(0, Regs.RBX, 32)
def test_mem32_64(self):
I64("movnti [rdi], eax").check_simple_deref(0, Regs.RDI, 32)
I64("movnti [rbp], rax").check_simple_deref(0, Regs.RBP, 64)
def test_mem64(self):
I64("movlps [rdi], xmm7").check_simple_deref(0, Regs.RDI, 64)
def test_mem128(self):
I64("movntps [rax], xmm3").check_simple_deref(0, Regs.RAX, 128)
def test_mem64_128(self):
I64("cmpxchg8b [rdx]").check_simple_deref(0, Regs.RDX, 64)
I64("cmpxchg16b [rbx]").check_simple_deref(0, Regs.RBX, 128)
def test_moffs8(self):
I64("mov al, [dword 0x12345678]").check_abs_disp(1, 0x12345678, 32, 8)
I64("mov [qword 0xaaaabbbbccccdddd], al").check_abs_disp(0, 0xaaaabbbbccccdddd, 64, 8)
def test_moff_full(self):
I64("mov [dword 0xaaaabbbb], rax").check_abs_disp(0, 0xffffffffaaaabbbb, 32, 64)
I64("mov rax, [qword 0xaaaabbbbccccdddd]").check_abs_disp(1, 0xaaaabbbbccccdddd, 64, 64)
def test_const1(self):
I64("shl rsi, 1").check_imm(1, 1, 8)
def test_regcl(self):
I64("rcl rbp, cl").check_reg(1, Regs.CL, 8)
def test_ib_rb(self):
I64("mov dl, 0x88").check_reg(0, Regs.DL, 8)
I64("mov spl, 0x88").check_reg(0, Regs.SPL, 8)
I64("mov r10b, 0x88").check_reg(0, Regs.R10B, 8)
def test_ib_r_dw_qw(self):
I64("bswap rcx").check_reg(0, Regs.RCX, 64)
I64("bswap r10").check_reg(0, Regs.R10, 64)
I64("push r10").check_reg(0, Regs.R10, 64)
def test_ib_r_full(self):
I64("inc rsi").check_reg(0, Regs.RSI, 64)
I64("inc r9").check_reg(0, Regs.R9, 64)
I64("push r10w").check_reg(0, Regs.R10W, 16)
I64("xchg r10d, eax").check_reg(0, Regs.R10D, 32)
def test_regi_esi(self):
I64("lodsb").check_simple_deref(1, Regs.RSI, 8)
I64("cmpsw").check_simple_deref(0, Regs.RSI, 16)
I64("lodsd").check_simple_deref(1, Regs.RSI, 32)
I64("lodsq").check_simple_deref(1, Regs.RSI, 64)
def test_regi_edi(self):
I64("movsb").check_simple_deref(0, Regs.RDI, 8)
I64("scasw").check_simple_deref(0, Regs.RDI, 16)
I64("stosd").check_simple_deref(0, Regs.RDI, 32)
I64("stosq").check_simple_deref(0, Regs.RDI, 64)
def test_regi_ebxal(self):
a = I64("xlatb")
a.check_type_size(0, distorm3.OPERAND_MEMORY, 8)
self.failIf(a.inst.operands[0].index != Regs.AL)
self.failIf(a.inst.operands[0].base != Regs.RBX)
def test_regi_eax(self):
I64("vmrun [rax]").check_simple_deref(0, Regs.RAX, 64)
def test_regdx(self):
#I64("in eax, dx").check_reg(1, Regs.DX, 16)
pass
def test_regecx(self):
I64("invlpga [rax], ecx").check_reg(1, Regs.ECX, 32)
def test_fpu_si(self):
I64("fxch st4").check_reg(0, Regs.ST4, 32)
def test_fpu_ssi(self):
a = I64("fcmovnbe st0, st3")
a.check_reg(0, Regs.ST0, 32)
a.check_reg(1, Regs.ST3, 32)
def test_fpu_sis(self):
a = I64("fadd st3, st0")
a.check_reg(0, Regs.ST3, 32)
a.check_reg(1, Regs.ST0, 32)
def test_mm(self):
I64("pand mm0, mm7").check_reg(0, Regs.MM0, 64)
def test_mm_rm(self):
I64("psllw mm0, 0x55").check_reg(0, Regs.MM0, 64)
def test_mm32(self):
I64("punpcklbw mm1, [rsi]").check_simple_deref(1, Regs.RSI, 32)
def test_mm64(self):
I64("packsswb mm3, [rbx]").check_simple_deref(1, Regs.RBX, 64)
def test_xmm(self):
I64("orps xmm5, xmm4").check_reg(0, Regs.XMM5, 128)
I64("orps xmm15, xmm4").check_reg(0, Regs.XMM15, 128)
def test_xmm_rm(self):
I64("psrlw xmm6, 0x12").check_reg(0, Regs.XMM6, 128)
I64("psrlw xmm13, 0x12").check_reg(0, Regs.XMM13, 128)
def test_xmm16(self):
I64("pmovsxbq xmm3, [rbp]").check_simple_deref(1, Regs.RBP, 16)
def test_xmm32(self):
I64("pmovsxwq xmm5, [rdi]").check_simple_deref(1, Regs.RDI, 32)
def test_xmm64(self):
I64("roundsd xmm6, [rsi], 0x55").check_simple_deref(1, Regs.RSI, 64)
def test_xmm128(self):
I64("roundpd xmm7, [rbx], 0xaa").check_simple_deref(1, Regs.RBX, 128)
I64("roundpd xmm7, xmm15, 0xaa").check_reg(1, Regs.XMM15, 128)
def test_regxmm0(self):
I64("blendvpd xmm1, xmm3, xmm0").check_reg(2, Regs.XMM0, 128)
def test_disp_only(self):
a = I64("add [0x12345678], rbx")
a.check_type_size(0, distorm3.OPERAND_ABSOLUTE_ADDRESS, 64)
self.failIf(a.inst.operands[0].dispSize != 32)
self.failIf(a.inst.operands[0].disp != 0x12345678)
def test_modrm(self):
texts = ["ADD [%s], RDI" % i for i in self.Derefs]
for i in enumerate(texts):
a = I64(i[1])
a.check_simple_deref(0, self.DerefsInfo[i[0]], 64)
def test_modrm_disp8(self):
texts = ["ADD [%s + 0x55], RSI" % i for i in self.Derefs]
for i in enumerate(texts):
a = I64(i[1])
a.check_simple_deref(0, self.DerefsInfo[i[0]], 64)
self.failIf(a.inst.operands[0].dispSize != 8)
self.failIf(a.inst.operands[0].disp != 0x55)
def test_modrm_disp32(self):
texts = ["ADD [%s + 0x33221144], RDX" % i for i in self.Derefs]
for i in enumerate(texts):
a = I64(i[1])
a.check_simple_deref(0, self.DerefsInfo[i[0]], 64)
self.failIf(a.inst.operands[0].dispSize != 32)
self.failIf(a.inst.operands[0].disp != 0x33221144)
def test_base_rbp(self):
a = I64("mov [rbp+0x55], eax")
a.check_simple_deref(0, Regs.RBP, 32)
self.failIf(a.inst.operands[0].dispSize != 8)
self.failIf(a.inst.operands[0].disp != 0x55)
a = I64("mov [rbp+0x55443322], eax")
a.check_simple_deref(0, Regs.RBP, 32)
self.failIf(a.inst.operands[0].dispSize != 32)
self.failIf(a.inst.operands[0].disp != 0x55443322)
def test_base_rip(self):
a = I64("mov [rip+0x12345678], rdx")
a.check_simple_deref(0, Regs.RIP, 64)
self.failIf(a.inst.operands[0].dispSize != 32)
self.failIf(a.inst.operands[0].disp != 0x12345678)
def test_reg8_rex(self):
I64("mov sil, al").check_reg(0, Regs.SIL, 8)
I64("inc bpl").check_reg(0, Regs.BPL, 8)
def test_imm64(self):
I64("mov rax, 0x1234567890abcdef").check_imm(1, 0x1234567890abcdef, 64)
def test_reg64(self):
I64("movsxd r10, eax").check_reg(0, Regs.R10, 64)
def test_rm16_32(self):
#MOVZXD RAX, [RAX]
I64("db 0x63\n db 0x00").check_simple_deref(1, Regs.RAX, 32)
#MOVZXDW RAX, [RAX]
#I64("db 0x66\n db 0x63\n db 0x00").check_simple_deref(1, Regs.RAX, 16)
#MOVZXD RAX, EAX
I64("db 0x63\n db 0xc0").check_reg(1, Regs.EAX, 32)
#MOVZXDW RAX, AX
#I64("db 0x66\n db 0x63\n db 0xc0").check_reg(1, Regs.AX, 16)
#MOVZXDW RAX, R8W
#I64("db 0x66\n db 0x41\n db 0x63\n db 0xc0").check_reg(1, Regs.R8W, 16)
Bases = ["RAX", "RCX", "RDX", "RBX", "RSP", "RSI", "RDI", "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"]
BasesInfo = [Regs.RAX, Regs.RCX, Regs.RDX, Regs.RBX, Regs.RSP, Regs.RSI, Regs.RDI, Regs.R8, Regs.R9, Regs.R10, Regs.R11, Regs.R12, Regs.R13, Regs.R14, Regs.R15]
Indices = ["RAX", "RCX", "RDX", "RBX", "RBP", "RSI", "RDI", "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"]
IndicesInfo = [Regs.RAX, Regs.RCX, Regs.RDX, Regs.RBX, Regs.RBP, Regs.RSI, Regs.RDI, Regs.R8, Regs.R9, Regs.R10, Regs.R11, Regs.R12, Regs.R13, Regs.R14, Regs.R15]
def test_bases(self):
for i in enumerate(self.Bases):
a = I64("cmp rbp, [%s]" % (i[1]))
a.check_simple_deref(1, self.BasesInfo[i[0]], 64)
def test_bases_disp32(self):
for i in enumerate(self.Bases):
a = I64("cmp rbp, [%s+0x12345678]" % (i[1]))
a.check_simple_deref(1, self.BasesInfo[i[0]], 64)
self.failIf(a.inst.operands[1].dispSize != 32)
self.failIf(a.inst.operands[1].disp != 0x12345678)
def test_scales(self):
for i in enumerate(self.Indices):
# A scale of 2 causes the scale to be omitted and changed from reg*2 to reg+reg.
for s in [4, 8]:
a = I64("and rbp, [%s*%d]" % (i[1], s))
a.check_deref(1, self.IndicesInfo[i[0]], None, 64)
self.failIf(a.inst.operands[1].scale != s)
def test_sib(self):
for i in enumerate(self.Indices):
for j in enumerate(self.Bases):
for s in [1, 2, 4, 8]:
a = I64("or rbp, [%s*%d + %s]" % (i[1], s, j[1]))
a.check_deref(1, self.IndicesInfo[i[0]], self.BasesInfo[j[0]], 64)
if s != 1:
self.failIf(a.inst.operands[1].scale != s)
def test_sib_disp8(self):
for i in enumerate(self.Indices):
for j in enumerate(self.Bases):
for s in [1, 2, 4, 8]:
a = I64("xor al, [%s*%d + %s + 0x55]" % (i[1], s, j[1]))
a.check_deref(1, self.IndicesInfo[i[0]], self.BasesInfo[j[0]], 8)
self.failIf(a.inst.operands[1].dispSize != 8)
self.failIf(a.inst.operands[1].disp != 0x55)
if s != 1:
self.failIf(a.inst.operands[1].scale != s)
def test_sib_disp32(self):
for i in enumerate(self.Indices):
for j in enumerate(self.Bases):
for s in [1, 2, 4, 8]:
a = I64("sub rdx, [%s*%d + %s + 0x55aabbcc]" % (i[1], s, j[1]))
a.check_deref(1, self.IndicesInfo[i[0]], self.BasesInfo[j[0]], 64)
self.failIf(a.inst.operands[1].dispSize != 32)
self.failIf(a.inst.operands[1].disp != 0x55aabbcc)
if s != 1:
self.failIf(a.inst.operands[1].scale != s)
def test_base32(self):
I64("mov eax, [ebx]").check_simple_deref(1, Regs.EBX, 32)
class TestInstTable(unittest.TestCase):
""" Check that locate_inst algorithm covers all opcode-length (ol)
for the varying sizes of opcodes.
The bad tests should not find an instruction, so they should fail on purpose,
to see we don't crash the diassembler.
Also test for some end-cases with nop and wait. """
def test_ol1(self):
IB32("00c0").check_mnemonic("ADD")
def test_ol13(self):
IB32("80c055").check_mnemonic("ADD")
def test_ol1d(self):
IB32("d900").check_mnemonic("FLD")
IB32("d9c8").check_mnemonic("FXCH")
IB32("d9e1").check_mnemonic("FABS")
def test_ol2(self):
IB32("0f06").check_mnemonic("CLTS")
def test_ol23(self):
IB32("0fbae055").check_mnemonic("BT")
def test_ol2d(self):
IB32("0f01e0").check_mnemonic("SMSW")
IB32("0f0130").check_mnemonic("LMSW")
IB32("0f01c9").check_mnemonic("MWAIT")
def test_ol3(self):
IB32("0f380000").check_mnemonic("PSHUFB")
def test_ol1_bad(self):
# There is no undefined instruction in the root, except a prefix, oh well.
pass
def test_ol13_bad(self):
IB32("f780").check_invalid()
def test_ol1d_bad(self):
IB32("d908").check_invalid()
IB32("d9d1").check_invalid()
IB32("d9ef").check_invalid()
def test_ol2_bad(self):
IB32("0fff").check_invalid()
def test_ol23_bad(self):
IB32("0f0dff").check_invalid()
def test_ol2d_bad(self):
IB32("0f0128").check_invalid()
IB32("0f01ca").check_invalid()
def test_ol3_bad(self):
IB32("0f0fff").check_invalid()
def test_index63(self):
# Test arpl, since it has a special treatment for 32/64 bits.
a = IB32("63c0")
a.check_mnemonic("ARPL")
a = IB64("63c0")
a.check_mnemonic("MOVSXD")
def test_index90(self):
# If nop is prefixed with f3, it is pause then. If it is prefixed with rex, it might be xchg.
IB32("90").check_mnemonic("NOP")
IB64("90").check_mnemonic("NOP")
IB64("4890").check_mnemonic("NOP")
IB64("4190").check_mnemonic("XCHG")
IB64("f390").check_mnemonic("PAUSE")
def test_wait(self):
# Wait instruction is very tricky. It might be coalesced with the next bytes.
# If the next bytes are 'waitable', otherwise it is a standalone instruction.
IB32("9b90").check_mnemonic("WAIT", 0) # nop isn't waitable.
IB32("9bdfe0").check_mnemonic("FSTSW") # waitable stsw
IB32("dfe0").check_mnemonic("FNSTSW") # non-waitable stsw
IB32("9b00c0").check_mnemonic("WAIT") # add isn't waitable
IB32("9bd930").check_mnemonic("FSTENV") # waitable fstenv
IB32("9b66dbe3").check_mnemonic("WAIT") # prefix breaks waiting
def test_3dnow(self):
IB32("0f0fc00d").check_mnemonic("PI2FD")
IB32("0f0d00").check_mnemonic("PREFETCH")
def test_mandatory(self):
IB32("f30f10c0").check_mnemonic("MOVSS")
IB32("660f10c0").check_mnemonic("MOVUPD")
IB32("660f71d055").check_mnemonic("PSRLW")
IB32("660ffec0").check_mnemonic("PADDD")
IB32("f20f10c0").check_mnemonic("MOVSD")
IB32("f20f11c0").check_mnemonic("MOVSD")
IB32("660f3800c0").check_mnemonic("PSHUFB")
IB32("f20f38f0c0").check_mnemonic("CRC32")
IB32("660fc730").check_mnemonic("VMCLEAR")
IB32("f30fc730").check_mnemonic("VMXON")
def test_vex(self):
I32("vaddpd ymm1, ymm2, ymm2").check_mnemonic("VADDPD") # pre encoding: 66, 0f, 58
I32("vaddps ymm1, ymm2, ymm2").check_mnemonic("VADDPS") # pre encoding: 0f, 58
I32("vaddsd xmm1, xmm2, qword [eax]").check_mnemonic("VADDSD") # pre encoding: f2, 0f, 58
I32("vaddss xmm1, xmm2, dword [eax]").check_mnemonic("VADDSS") # pre encoding: f3, 0f, 58
I32("vmovsd xmm1, xmm2, xmm3").check_mnemonic("VMOVSD") # pre encoding: f2, 0f, 10
I32("vmovsd xmm1, qword [eax]").check_mnemonic("VMOVSD") # pre encoding: f2 0f 10 - but VEX.vvvv is not encoded!
# Since in a VEX prefix you can encode the virtual prefix, we got three ways to get to 0xf 0x38
# So see that both work well.
IB32("c4e279dbc2").check_mnemonic("VAESIMC") # pre encoding: 66, 0f, 38, db, virtual prefix is 0f 38
IB32("c4e17938dbc2").check_mnemonic("VAESIMC") # the virtual prefix is only 0f
IB32("c5f938dbc2").check_mnemonic("VAESIMC") # the virtual prefix is only 0f, but short VEX
# Same test as earlier, but for 0xf 0x3a, though this instruction doesn't have a short form.
IB32("c4e3710dc255").check_mnemonic("VBLENDPD") # pre encoding: 66, 0f, 3a, 0d, virtual prefix is 0f 3a
IB32("c4e1713a0dc255").check_mnemonic("VBLENDPD") # pre encoding: 66, 0f, 3a, 0d, virtual prefix is 0f
I32("vldmxcsr dword [eax]").check_mnemonic("VLDMXCSR")
I32("vzeroupper").check_mnemonic("VZEROUPPER")
I32("vzeroall").check_mnemonic("VZEROALL")
I32("vpslld xmm1, xmm2, xmm3").check_mnemonic("VPSLLD")
def test_vex_special(self):
# VVVV encoded, where there is not such an encoding for the VAESIMC instruction.
IB32("c4e271dbca").check_invalid()
IB32("c4e2791800").check_mnemonic("VBROADCASTSS") # just to make sure this instruction is fine.
IB32("c4e279ff00").check_invalid() # pre encoding: 66, 0f, 38, ff
IB32("c4e179ff00").check_invalid() # pre encoding: 66, 0f, 38, ff, mmmmm = 1
IB32("c4e379ff00").check_invalid() # pre encoding: 66, 0f, 38, ff, mmmmm = 3
IB32("c4e4791800").check_invalid() # pre encoding: 66, 0f, 38, 18, mmmmm = 4
IB32("c5f8ae10").check_mnemonic("VLDMXCSR") # pre encoding: 0f, ae /02
IB32("c4c178ae10").check_mnemonic("VLDMXCSR") # longer form of 0f, ae /02
IB32("c4c179ae10").check_invalid() # longer form of 0f, ae /02, invalid pp=1
IB32("c4c17aae10").check_invalid() # longer form of 0f, ae /02, invalid pp=2
IB32("c4c17bae10").check_invalid() # longer form of 0f, ae /02, invalid pp=3
IB32("c4c17877").check_mnemonic("VZEROUPPER") # longer form of 0f, 77
IB32("c4c17c77").check_mnemonic("VZEROALL") # longer form of 0f, 77
IB32("c4c97c77").check_invalid() # longer form of 0f, 77, invalid mmmmm
def test_crc32(self):
I32("crc32 eax, al").check_reg(0, Regs.EAX, 32)
def test_lzcnt(self):
# This is the only instruction that has a mandatory prefix and can have ALSO a valid operand size prefix!
I32("lzcnt ax, bx").check_reg(0, Regs.AX, 16)
I32("lzcnt eax, ebx").check_reg(0, Regs.EAX, 32)
I64("lzcnt rax, rbx").check_reg(0, Regs.RAX, 64)
class TestAVXOperands(unittest.TestCase):
def test_rm32(self):
#I16("vextractps eax, xmm2, 3").check_reg(0, Regs.EAX, 32)
I32("vextractps eax, xmm2, 3").check_reg(0, Regs.EAX, 32)
I64("vextractps eax, xmm2, 3").check_reg(0, Regs.EAX, 32)
def test_reg32_64_m8(self):
#I16("vpextrb eax, xmm2, 3").check_reg(0, Regs.EAX, 32)
I32("vpextrb eax, xmm2, 3").check_reg(0, Regs.EAX, 32)
I64("vpextrb eax, xmm2, 3").check_reg(0, Regs.EAX, 32)
I64("vpextrb rax, xmm2, 3").check_reg(0, Regs.RAX, 64)
I32("vpextrb [ebx], xmm2, 3").check_simple_deref(0, Regs.EBX, 8)
I64("vpextrb [rbx], xmm2, 3").check_simple_deref(0, Regs.RBX, 8)
def test_reg32_64_m16(self):
I32("vpextrw eax, xmm2, 3").check_reg(0, Regs.EAX, 32)
I64("vpextrw rax, xmm2, 3").check_reg(0, Regs.RAX, 64)
I64("vpextrw rax, xmm2, 3").check_reg(0, Regs.RAX, 64)
I32("vpextrw [ebx], xmm2, 3").check_simple_deref(0, Regs.EBX, 16)
I64("vpextrw [rbx], xmm2, 3").check_simple_deref(0, Regs.RBX, 16)
def test_wreg32_64_WITH_wxmm32_64(self):
a = I32("vcvtss2si eax, xmm1")
a.check_reg(0, Regs.EAX, 32)
a.check_reg(1, Regs.XMM1, 128)
a = I64("vcvtss2si rax, [rbx]")
a.check_reg(0, Regs.RAX, 64)
a.check_simple_deref(1, Regs.RBX, 64)
a = I64("vcvtss2si eax, [rbx]")
a.check_reg(0, Regs.EAX, 32)
a.check_simple_deref(1, Regs.RBX, 32)
def test_vxmm(self):
I32("vaddsd xmm1, xmm2, xmm3").check_reg(1, Regs.XMM2, 128)
I64("vaddsd xmm2, xmm3, xmm4").check_reg(1, Regs.XMM3, 128)
def test_xmm_imm(self):
I32("vpblendvb xmm1, xmm2, xmm3, xmm4").check_reg(3, Regs.XMM4, 128)
# Force XMM15, but high bit is ignored in 32bits.
self.failIf(IB32("c4e3694ccbf0").inst.operands[3].index != Regs.XMM7)
I64("vpblendvb xmm1, xmm2, xmm3, xmm15").check_reg(3, Regs.XMM15, 128)
def test_yxmm(self):
I32("vaddsubpd ymm2, ymm4, ymm6").check_reg(0, Regs.YMM2, 256)
I32("vaddsubpd xmm7, xmm4, xmm6").check_reg(0, Regs.XMM7, 128)
I64("vaddsubpd ymm12, ymm4, ymm6").check_reg(0, Regs.YMM12, 256)
I64("vaddsubpd xmm14, xmm4, xmm6").check_reg(0, Regs.XMM14, 128)
def test_yxmm_imm(self):
I32("vblendvpd xmm1, xmm2, xmm3, xmm4").check_reg(3, Regs.XMM4, 128)
I32("vblendvpd ymm1, ymm2, ymm3, ymm4").check_reg(3, Regs.YMM4, 256)
# Force YMM15, but high bit is ignored in 32bits.
self.failIf(IB32("c4e36d4bcbf0").inst.operands[3].index != Regs.YMM7)
I64("vblendvpd xmm1, xmm2, xmm3, xmm14").check_reg(3, Regs.XMM14, 128)
I64("vblendvpd ymm1, ymm2, ymm3, ymm9").check_reg(3, Regs.YMM9, 256)
def test_ymm(self):
I32("vbroadcastsd ymm5, [eax]").check_reg(0, Regs.YMM5, 256)
I64("vbroadcastsd ymm13, [rax]").check_reg(0, Regs.YMM13, 256)
def test_ymm256(self):
I32("vperm2f128 ymm2, ymm4, [eax], 0x55").check_simple_deref(2, Regs.EAX, 256)
I64("vperm2f128 ymm2, ymm14, [rax], 0x55").check_simple_deref(2, Regs.RAX, 256)
def test_vymm(self):
I32("vinsertf128 ymm1, ymm4, xmm4, 0xaa").check_reg(1, Regs.YMM4, 256)
I64("vinsertf128 ymm1, ymm15, xmm4, 0xaa").check_reg(1, Regs.YMM15, 256)
def test_vyxmm(self):
I32("vmaxpd xmm1, xmm2, xmm3").check_reg(1, Regs.XMM2, 128)
I32("vmaxpd ymm1, ymm2, ymm3").check_reg(1, Regs.YMM2, 256)
I64("vmaxpd xmm1, xmm12, xmm3").check_reg(1, Regs.XMM12, 128)
I64("vmaxpd ymm1, ymm12, ymm3").check_reg(1, Regs.YMM12, 256)
def test_yxmm64_256(self):
I32("vmovddup xmm1, xmm2").check_reg(1, Regs.XMM2, 128)
I32("vmovddup ymm1, ymm2").check_reg(1, Regs.YMM2, 256)
I32("vmovddup xmm1, [ecx]").check_simple_deref(1, Regs.ECX, 64)
I32("vmovddup ymm1, [ebx]").check_simple_deref(1, Regs.EBX, 256)
I64("vmovddup xmm1, xmm12").check_reg(1, Regs.XMM12, 128)
I64("vmovddup ymm1, ymm12").check_reg(1, Regs.YMM12, 256)
I64("vmovddup xmm1, [rcx]").check_simple_deref(1, Regs.RCX, 64)
I64("vmovddup ymm1, [rbx]").check_simple_deref(1, Regs.RBX, 256)
def test_yxmm128_256(self):
I32("vandnpd xmm1, xmm2, xmm3").check_reg(2, Regs.XMM3, 128)
I32("vandnpd ymm1, ymm2, ymm3").check_reg(2, Regs.YMM3, 256)
I32("vandnpd xmm1, xmm2, [edi]").check_simple_deref(2, Regs.EDI, 128)
I32("vandnpd ymm1, ymm2, [esi]").check_simple_deref(2, Regs.ESI, 256)
I64("vandnpd xmm1, xmm2, xmm13").check_reg(2, Regs.XMM13, 128)
I64("vandnpd ymm1, ymm2, ymm13").check_reg(2, Regs.YMM13, 256)
I64("vandnpd xmm1, xmm2, [rdi]").check_simple_deref(2, Regs.RDI, 128)
I64("vandnpd ymm1, ymm2, [rsi]").check_simple_deref(2, Regs.RSI, 256)
def test_lxmm64_128(self):
I32("vcvtdq2pd xmm1, xmm2").check_reg(1, Regs.XMM2, 128)
I32("vcvtdq2pd xmm1, [eax]").check_simple_deref(1, Regs.EAX, 64)
I32("vcvtdq2pd ymm1, [ebx]").check_simple_deref(1, Regs.EBX, 128)
I64("vcvtdq2pd xmm1, xmm12").check_reg(1, Regs.XMM12, 128)
I64("vcvtdq2pd xmm1, [rax]").check_simple_deref(1, Regs.RAX, 64)
I64("vcvtdq2pd ymm1, [rbx]").check_simple_deref(1, Regs.RBX, 128)
def test_lmem128_256(self):
I32("vlddqu xmm1, [eax]").check_simple_deref(1, Regs.EAX, 128)
I32("vlddqu ymm1, [eax]").check_simple_deref(1, Regs.EAX, 256)
I64("vlddqu xmm1, [r14]").check_simple_deref(1, Regs.R14, 128)
I64("vlddqu ymm1, [r13]").check_simple_deref(1, Regs.R13, 256)
class TestMisc(unittest.TestCase):
def test_lods(self):
a = I16("lodsb")
a.check_reg(0, Regs.AL, 8)
a.check_simple_deref(1, Regs.SI, 8)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("lodsw")
a.check_reg(0, Regs.AX, 16)
a.check_simple_deref(1, Regs.ESI, 16)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("lodsd")
a.check_reg(0, Regs.EAX, 32)
a.check_simple_deref(1, Regs.ESI, 32)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I64("lodsq")
a.check_reg(0, Regs.RAX, 64)
a.check_simple_deref(1, Regs.RSI, 64)
self.assertEqual(a.inst.isSegmentDefault, False)
a = I16("db 0x2e\nlodsb")
a.check_reg(0, Regs.AL, 8)
a.check_simple_deref(1, Regs.SI, 8)
self.assertEqual(a.inst.segment, Regs.CS)
self.assertEqual(a.inst.isSegmentDefault, False)
a = I32("db 0x2e\nlodsw")
a.check_reg(0, Regs.AX, 16)
a.check_simple_deref(1, Regs.ESI, 16)
self.assertEqual(a.inst.segment, Regs.CS)
self.assertEqual(a.inst.isSegmentDefault, False)
a = I32("db 0x2e\nlodsd")
a.check_reg(0, Regs.EAX, 32)
a.check_simple_deref(1, Regs.ESI, 32)
self.assertEqual(a.inst.segment, Regs.CS)
self.assertEqual(a.inst.isSegmentDefault, False)
a = I64("db 0x65\nlodsq")
a.check_reg(0, Regs.RAX, 64)
a.check_simple_deref(1, Regs.RSI, 64)
self.assertEqual(a.inst.segment, Regs.GS)
self.assertEqual(a.inst.isSegmentDefault, False)
def test_stos(self):
a = I16("stosb")
a.check_simple_deref(0, Regs.DI, 8)
a.check_reg(1, Regs.AL, 8)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("stosw")
a.check_simple_deref(0, Regs.EDI, 16)
a.check_reg(1, Regs.AX, 16)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("stosd")
a.check_simple_deref(0, Regs.EDI, 32)
a.check_reg(1, Regs.EAX, 32)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I64("stosq")
a.check_simple_deref(0, Regs.RDI, 64)
a.check_reg(1, Regs.RAX, 64)
self.assertEqual(a.inst.isSegmentDefault, False)
a = I16("db 0x2e\nstosb")
a.check_simple_deref(0, Regs.DI, 8)
a.check_reg(1, Regs.AL, 8)
self.assertEqual(a.inst.unusedPrefixesMask, 1)
self.assertEqual(a.inst.segment, Regs.ES)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("db 0x2e\nstosw")
a.check_simple_deref(0, Regs.EDI, 16)
a.check_reg(1, Regs.AX, 16)
self.assertEqual(a.inst.unusedPrefixesMask, 1)
self.assertEqual(a.inst.segment, Regs.ES)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("db 0x2e\nstosd")
a.check_simple_deref(0, Regs.EDI, 32)
a.check_reg(1, Regs.EAX, 32)
self.assertEqual(a.inst.unusedPrefixesMask, 1)
self.assertEqual(a.inst.segment, Regs.ES)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I64("db 0x65\nstosq")
a.check_simple_deref(0, Regs.RDI, 64)
a.check_reg(1, Regs.RAX, 64)
self.assertEqual(a.inst.unusedPrefixesMask, 1)
self.assertEqual(a.inst.segment, REG_NONE)
def test_scas(self):
a = I16("scasb")
a.check_simple_deref(0, Regs.DI, 8)
a.check_reg(1, Regs.AL, 8)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("scasw")
a.check_simple_deref(0, Regs.EDI, 16)
a.check_reg(1, Regs.AX, 16)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("scasd")
a.check_simple_deref(0, Regs.EDI, 32)
a.check_reg(1, Regs.EAX, 32)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I64("scasq")
a.check_simple_deref(0, Regs.RDI, 64)
a.check_reg(1, Regs.RAX, 64)
self.assertEqual(a.inst.isSegmentDefault, False)
a = I16("db 0x2e\nscasb")
a.check_simple_deref(0, Regs.DI, 8)
a.check_reg(1, Regs.AL, 8)
self.assertEqual(a.inst.unusedPrefixesMask, 1)
self.assertEqual(a.inst.segment, Regs.ES)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("db 0x2e\nscasw")
a.check_simple_deref(0, Regs.EDI, 16)
a.check_reg(1, Regs.AX, 16)
self.assertEqual(a.inst.unusedPrefixesMask, 1)
self.assertEqual(a.inst.segment, Regs.ES)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("db 0x2e\nscasd")
a.check_simple_deref(0, Regs.EDI, 32)
a.check_reg(1, Regs.EAX, 32)
self.assertEqual(a.inst.unusedPrefixesMask, 1)
self.assertEqual(a.inst.segment, Regs.ES)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I64("db 0x65\nscasq")
a.check_simple_deref(0, Regs.RDI, 64)
a.check_reg(1, Regs.RAX, 64)
self.assertEqual(a.inst.unusedPrefixesMask, 1)
self.assertEqual(a.inst.segment, REG_NONE)
def test_cmps(self):
a = I64("cmpsd")
a.check_simple_deref(0, Regs.RSI, 32)
a.check_simple_deref(1, Regs.RDI, 32)
self.assertEqual(a.inst.unusedPrefixesMask, 0)
self.assertEqual(a.inst.segment, REG_NONE)
a = I16("db 0x2e\ncmpsb")
a.check_simple_deref(0, Regs.SI, 8)
a.check_simple_deref(1, Regs.DI, 8)
self.assertEqual(a.inst.unusedPrefixesMask, 0)
self.assertEqual(a.inst.segment, Regs.CS)
self.assertEqual(a.inst.isSegmentDefault, False)
def test_movs(self):
a = I32("movsd")
a.check_simple_deref(0, Regs.EDI, 32)
a.check_simple_deref(1, Regs.ESI, 32)
self.assertEqual(a.inst.unusedPrefixesMask, 0)
self.assertEqual(a.inst.segment, Regs.DS)
self.assertEqual(a.inst.isSegmentDefault, True)
a = I32("db 0x2e\nmovsw")
a.check_simple_deref(0, Regs.EDI, 16)
a.check_simple_deref(1, Regs.ESI, 16)
self.assertEqual(a.inst.unusedPrefixesMask, 0)
self.assertEqual(a.inst.segment, Regs.CS)
self.assertEqual(a.inst.isSegmentDefault, False)
def test_ins(self):
a = I32("db 0x65\ninsw")
a.check_simple_deref(0, Regs.EDI, 16)
a.check_reg(1, Regs.DX, 16)
self.assertEqual(a.inst.unusedPrefixesMask, 1)
self.assertEqual(a.inst.segment, Regs.ES)
self.assertEqual(a.inst.isSegmentDefault, True)
def test_outs(self):
a = I64("db 0x65\noutsd")
a.check_reg(0, Regs.DX, 16)
a.check_simple_deref(1, Regs.RSI, 32)
self.assertEqual(a.inst.segment, Regs.GS)
self.assertEqual(a.inst.isSegmentDefault, False)
def test_branch_hints(self):
self.failIf("FLAG_HINT_TAKEN" not in I32("db 0x3e\n jnz 0x50").inst.flags)
self.failIf("FLAG_HINT_NOT_TAKEN" not in I32("db 0x2e\n jp 0x55").inst.flags)
self.failIf("FLAG_HINT_NOT_TAKEN" not in I32("db 0x2e\n jo 0x55000").inst.flags)
self.failIf(I32("db 0x2e\n loop 0x55").inst.rawFlags & 0x1f, 0)
def test_mnemonic_by_vexw(self):
I32("vmovd xmm1, eax").check_mnemonic("VMOVD")
I64("vmovd xmm1, eax").check_reg(1, Regs.EAX, 32)
a = I64("vmovq xmm1, rax")
a.check_mnemonic("VMOVQ")
a.check_reg(1, Regs.RAX, 64)
def test_vexl_ignored(self):
I32("vaesdeclast xmm1, xmm2, xmm3").check_reg(0, Regs.XMM1, 128)
IB32("c4e26ddfcb").check_mnemonic("VAESDECLAST")
IB64("c4e26ddfcb").check_mnemonic("VAESDECLAST")
def test_vexl_needed(self):
I32("vinsertf128 ymm1, ymm2, xmm4, 0x42").check_mnemonic("VINSERTF128")
IB32("c4e36918cc42").check_invalid() # Without VEX.L.
IB64("c4e36918cc42").check_invalid() # Without VEX.L.
def test_force_reg0(self):
I32("extrq xmm1, 0x55, 0x66").check_mnemonic("EXTRQ")
I64("extrq xmm14, 0x55, 0x66").check_reg(0, Regs.XMM14, 128)
def test_pause(self):
self.assertEqual(I16("pause").inst.size, 2)
self.assertEqual(I32("pause").inst.size, 2)
self.assertEqual(I64("pause").inst.size, 2)
def test_nop(self):
self.assertEqual(I16("db 0x90").inst.size, 1)
self.assertEqual(I32("db 0x90").inst.size, 1)
self.assertEqual(I64("db 0x90").inst.size, 1)
self.assertEqual(I64("db 0x48, 0x90").inst.size, 2)
# XCHG R8D, EAX
a = I64("db 0x41, 0x90")
a.check_reg(0, Regs.R8D, 32)
a.check_reg(1, Regs.EAX, 32)
# XCHG R8, RAX
a = I64("db 0x49, 0x90")
a.check_reg(0, Regs.R8, 64)
a.check_reg(1, Regs.RAX, 64)
a = I64("db 0x4f, 0x90")
a.check_reg(0, Regs.R8, 64)
a.check_reg(1, Regs.RAX, 64)
def test_3dnow(self):
I32("pfadd mm4, [eax]").check_reg(0, Regs.MM4, 64)
I32("pfsub mm5, [eax]").check_reg(0, Regs.MM5, 64)
I32("pfrcpit1 mm1, [ebx]").check_mnemonic("PFRCPIT1")
I64("pavgusb mm1, mm2").check_mnemonic("PAVGUSB")
def test_all_segs(self):
I16("push fs").check_reg(0, Regs.FS, 16)
I16("push gs").check_reg(0, Regs.GS, 16)
I16("push ds").check_reg(0, Regs.DS, 16)
I16("push cs").check_reg(0, Regs.CS, 16)
I16("push ds").check_reg(0, Regs.DS, 16)
I16("push es").check_reg(0, Regs.ES, 16)
def test_op4(self):
a = I32("insertq xmm2, xmm1, 0x55, 0xaa")
a.check_reg(0, Regs.XMM2, 128)
a.check_reg(1, Regs.XMM1, 128)
a.check_type_size(2, distorm3.OPERAND_IMMEDIATE, 8)
self.assertEqual(a.inst.operands[2].value, 0x55)
a.check_type_size(3, distorm3.OPERAND_IMMEDIATE, 8)
self.assertEqual(a.inst.operands[3].value, 0xaa)
def test_pseudo_cmp(self):
I32("cmpps xmm2, xmm3, 0x7")
I64("cmpps xmm2, xmm4, 0x2")
def test_jmp_counters(self):
I16("jcxz 0x100")
I32("jecxz 0x100")
I64("jrcxz 0x100")
def test_natives(self):
self.assertEqual(I16("pusha").inst.size, 1)
self.assertEqual(I16("pushad").inst.size, 2)
self.assertEqual(I32("pusha").inst.size, 1)
self.assertEqual(I32("pushaw").inst.size, 2)
self.assertEqual(I16("CBW").inst.size, 1)
self.assertEqual(I32("CWDE").inst.size, 1)
self.assertEqual(I64("CDQE").inst.size, 2)
def test_modrm_based(self):
I32("movhlps xmm0, xmm1")
I32("movhps xmm0, [eax]")
I64("movhlps xmm0, xmm1")
I64("movhps xmm0, [eax]")
I64("movhlps xmm0, xmm1")
I64("movlps xmm0, [eax]")
def test_wait(self):
self.assertEqual(I16("wait").inst.size, 1)
def test_include_wait(self):
self.assertEqual(I16("db 0x9b\n db 0xd9\n db 0x30").inst.size, 3)
def test_loopxx_counters_size(self):
a = I16("loopz 0x50")
a.check_type_size(0,distorm3.OPERAND_IMMEDIATE, 8)
a.check_addr_size(16)
a = I32("loopz 0x50")
a.check_type_size(0,distorm3.OPERAND_IMMEDIATE, 8)
a.check_addr_size(32)
a = I64("loopz 0x50")
a.check_type_size(0,distorm3.OPERAND_IMMEDIATE, 8)
a.check_addr_size(64)
a = I16("db 0x67\n loopz 0x50")
a.check_type_size(0,distorm3.OPERAND_IMMEDIATE, 8)
a.check_addr_size(32)
a = I32("db 0x67\n loopz 0x50")
a.check_type_size(0,distorm3.OPERAND_IMMEDIATE, 8)
a.check_addr_size(16)
a = I64("db 0x67\n loopnz 0x50")
a.check_type_size(0,distorm3.OPERAND_IMMEDIATE, 8)
a.check_addr_size(32)
class TestPrefixes(unittest.TestCase):
Derefs16 = ["BX + SI", "BX + DI", "BP + SI", "BP + DI", "SI", "DI", "BP", "BX"]
Derefs32 = ["EAX", "ECX", "EDX", "EBX", "EBP", "ESI", "EDI"]
Bases = ["EAX", "ECX", "EDX", "EBX", "ESP", "ESI", "EDI"]
def test_without_seg(self):
self.assertEqual(I64("and [rip+0X5247], ch").inst.segment, REG_NONE)
self.assertEqual(I32("mov eax, [ebp*4]").inst.segment, Regs.DS)
self.assertEqual(I32("mov eax, [eax*4+ebp]").inst.segment, Regs.SS)
def test_default_seg16(self):
a = I16("mov [ds:0x1234], ax")
self.assertEqual(a.inst.segment, Regs.DS)
self.assertEqual(a.inst.isSegmentDefault, 1)
a = I16("mov [cs:0x1234], ax")
self.assertEqual(a.inst.segment, Regs.CS)
self.assertEqual(a.inst.isSegmentDefault, False)
def test_default_seg16_all(self):
for i in ["ADD [ds:%s], AX" % i for i in self.Derefs16]:
a = I16(i)
self.assertEqual(a.inst.segment, Regs.DS)
if i[8:10] == "BP":
self.assertEqual(a.inst.isSegmentDefault, False)
else:
self.assertEqual(a.inst.isSegmentDefault, True)
# Test with disp8
for i in ["ADD [ds:%s + 0x55], AX" % i for i in self.Derefs16]:
a = I16(i)
self.assertEqual(a.inst.segment, Regs.DS)
if i[8:10] == "BP":
self.assertEqual(a.inst.isSegmentDefault, False)
else:
self.assertEqual(a.inst.isSegmentDefault, True)
def test_default_seg32(self):
self.assertEqual(I32("mov [ds:0x12345678], eax").inst.segment, Regs.DS)
self.assertEqual(I32("mov [cs:0x12345678], eax").inst.segment, Regs.CS)
texts = ["ADD [ds:%s], EAX" % i for i in self.Derefs32]
for i in enumerate(texts):
a = I32(i[1])
self.assertEqual(a.inst.segment, Regs.DS)
if self.Derefs32[i[0]] == "EBP":
self.assertEqual(a.inst.isSegmentDefault, False)
else:
self.assertEqual(a.inst.isSegmentDefault, True)
# Test with disp8
texts = ["ADD [ds:%s + 0x55], EAX" % i for i in self.Derefs32]
for i in enumerate(texts):
a = I32(i[1])
self.assertEqual(a.inst.segment, Regs.DS)
if self.Derefs32[i[0]] == "EBP":
self.assertEqual(a.inst.isSegmentDefault, False)
else:
self.assertEqual(a.inst.isSegmentDefault, True)
def test_sib(self):
for i in enumerate(self.Derefs32):
for j in enumerate(self.Bases):
for s in [1, 2, 4, 8]:
a = I32("cmp ebp, [ds:%s*%d + %s]" % (i[1], s, j[1]))
a2 = I32("cmp ebp, [ds:%s*%d + %s + 0x55]" % (i[1], s, j[1]))
self.assertEqual(a.inst.segment, Regs.DS)
self.assertEqual(a2.inst.segment, Regs.DS)
if (j[1] == "EBP" or j[1] == "ESP"):
self.assertEqual(a.inst.isSegmentDefault, False)
self.assertEqual(a2.inst.isSegmentDefault, False)
else:
self.assertEqual(a.inst.isSegmentDefault, True)
self.assertEqual(a2.inst.isSegmentDefault, True)
def test_seg64(self):
self.assertEqual(I64("mov [gs:rip+0x12345678], eax").inst.segment, Regs.GS)
self.assertEqual(I64("mov [fs:0x12345678], eax").inst.segment, Regs.FS)
def test_lock(self):
self.failIf("FLAG_LOCK" not in I32("lock inc dword [eax]").inst.flags)
def test_repnz(self):
self.failIf("FLAG_REPNZ" not in I32("repnz scasb").inst.flags)
def test_rep(self):
self.failIf("FLAG_REP" not in I32("rep movsb").inst.flags)
def test_segment_override(self):
self.assertEqual(I32("mov eax, [cs:eax]").inst.segment, Regs.CS)
self.assertEqual(I32("mov eax, [ds:eax]").inst.segment, Regs.DS)
self.assertEqual(I32("mov eax, [es:eax]").inst.segment, Regs.ES)
self.assertEqual(I32("mov eax, [ss:eax]").inst.segment, Regs.SS)
self.assertEqual(I32("mov eax, [fs:eax]").inst.segment, Regs.FS)
self.assertEqual(I32("mov eax, [gs:eax]").inst.segment, Regs.GS)
def test_unused_normal(self):
self.assertEqual(IB64("4090").inst.unusedPrefixesMask, 1)
self.assertEqual(IB64("6790").inst.unusedPrefixesMask, 1)
self.assertEqual(IB64("6690").inst.unusedPrefixesMask, 1)
self.assertEqual(IB64("f290").inst.unusedPrefixesMask, 1)
self.assertEqual(IB64("f090").inst.unusedPrefixesMask, 1)
self.assertEqual(IB64("f3c3").inst.unusedPrefixesMask, 1)
self.assertEqual(IB64("64c3").inst.unusedPrefixesMask, 1)
def test_unused_doubles(self):
self.assertEqual(IB64("404090").inst.unusedPrefixesMask, 3)
self.assertEqual(IB64("676790").inst.unusedPrefixesMask, 3)
self.assertEqual(IB64("666690").inst.unusedPrefixesMask, 3)
self.assertEqual(IB64("f2f290").inst.unusedPrefixesMask, 3)
self.assertEqual(IB64("f0f090").inst.unusedPrefixesMask, 3)
self.assertEqual(IB64("f3f3c3").inst.unusedPrefixesMask, 3)
self.assertEqual(IB64("642ec3").inst.unusedPrefixesMask, 3)
def test_unused_sequences(self):
self.assertEqual(len(IB64("66"*15).insts), 15)
r = int(random.random() * 14)
self.assertEqual(IB64("66"*r + "90").inst.unusedPrefixesMask, (1 << r) - 1)
def test_rexw_66(self):
self.assertEqual(IB64("6648ffc0").inst.unusedPrefixesMask, 1)
self.assertEqual(IB64("6640ffc0").inst.unusedPrefixesMask, 2)
self.assertEqual(IB64("48660f10c0").inst.unusedPrefixesMask, 1)
self.assertEqual(IB64("664f0f10c0").inst.unusedPrefixesMask, 0)
class TestInvalid(unittest.TestCase):
def align(self):
for i in xrange(15):
IB32("90")
def test_filter_mem(self):
#cmpxchg8b eax
IB32("0fc7c8")
self.align()
def test_drop_prefixes(self):
# Drop prefixes when we encountered an instruction that couldn't be decoded.
IB32("666764ffff")
self.align()
def test_zzz_must_be_last_drop_prefixes(self):
# Drop prefixes when the last byte in stream is a prefix.
IB32("66")
class FlowControl:
""" The flow control instruction will be flagged in the lo nibble of the 'meta' field in _InstInfo of diStorm.
They are used to distinguish between flow control instructions (such as: ret, call, jmp, jz, etc) to normal ones. """
(CALL,
RET,
SYS,
BRANCH,
COND_BRANCH,
INT) = range(1, 7)
DF_MAXIMUM_ADDR16 = 1
DF_MAXIMUM_ADDR32 = 2
DF_RETURN_FC_ONLY = 4
DF_STOP_ON_CALL = 8
DF_STOP_ON_RET = 0x10
DF_STOP_ON_SYS = 0x20
DF_STOP_ON_BRANCH = 0x40
DF_STOP_ON_COND_BRANCH = 0x80
DF_STOP_ON_INT = 0x100
DF_STOP_ON_FLOW_CONTROL = (DF_STOP_ON_CALL | DF_STOP_ON_RET | DF_STOP_ON_SYS | DF_STOP_ON_BRANCH | DF_STOP_ON_COND_BRANCH | DF_STOP_ON_INT)
class TestFeatures(unittest.TestCase):
def test_addr16(self):
#I16("mov [-4], bx", 0, DF_MAXIMUM_ADDR16).check_disp(0, 0xfffc, 16, 16)
pass
def test_add32(self):
pass
def test_fc(self):
pairs = [
(["INT 5", "db 0xf1", "INT 3", "INTO", "UD2"], FlowControl.INT),
(["CALL 0x50", "CALL FAR [ebx]"], FlowControl.CALL),
(["RET", "IRET", "RETF"], FlowControl.RET),
(["SYSCALL", "SYSENTER", "SYSRET", "SYSEXIT"], FlowControl.SYS),
(["JMP 0x50", "JMP FAR [ebx]"], FlowControl.BRANCH),
(["JCXZ 0x50", "JO 0x50", "JNO 0x50", "JB 0x50", "JAE 0x50",
"JZ 0x50", "JNZ 0x50", "JBE 0x50", "JA 0x50", "JS 0x50",
"JNS 0x50", "JP 0x50", "JNP 0x50", "JL 0x50", "JGE 0x50",
"JLE 0x50", "JG 0x50", "LOOP 0x50", "LOOPZ 0x50", "LOOPNZ 0x50"], FlowControl.COND_BRANCH)
]
for i in pairs:
for j in i[0]:
a = I32(j + "\nnop", DF_STOP_ON_FLOW_CONTROL)
self.assertEqual(len(a.insts), 1)
self.assertEqual(a.inst["meta"] & 7, i[1])
a = I32("push eax\nnop\n" + j, DF_RETURN_FC_ONLY)
self.assertEqual(len(a.insts), 1)
a = I32("nop\nxor eax, eax\n" + j + "\ninc eax", DF_RETURN_FC_ONLY | DF_STOP_ON_FLOW_CONTROL)
self.assertEqual(len(a.insts), 1)
def test_filter(self):
pass
def GetNewSuite(className):
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(className))
return suite
def initfiles():
for i in ["bin16", "bin32", "bin64"]:
fbin.append(open("build\\linux\\"+i, "wb"))
if __name__ == "__main__":
random.seed()
#initfiles() # Used to emit the bytes of the tests - useful for code coverage input.
suite = unittest.TestSuite()
suite.addTest(GetNewSuite(TestMode16))
suite.addTest(GetNewSuite(TestMode32))
suite.addTest(GetNewSuite(TestMode64))
suite.addTest(GetNewSuite(TestInstTable))
suite.addTest(GetNewSuite(TestAVXOperands))
suite.addTest(GetNewSuite(TestMisc))
suite.addTest(GetNewSuite(TestPrefixes))
#suite.addTest(GetNewSuite(TestInvalid))
#suite.addTest(GetNewSuite(TestFeatures))
unittest.TextTestRunner(verbosity=1).run(suite)
|
mit
|
minhphung171093/GreenERP_V9
|
openerp/addons/sale_service/__openerp__.py
|
23
|
1356
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Create Tasks from SO',
'version': '1.0',
'category': 'Project Management',
'description': """
Automatically creates project tasks from procurement lines.
===========================================================
This module will automatically create a new task for each procurement order line
(e.g. for sale order lines), if the corresponding product meets the following
characteristics:
* Product Type = Service
* Create Task Automatically = True
If on top of that a project is specified on the product form (in the Procurement
tab), then the new task will be created in that specific project. Otherwise, the
new task will not belong to any project, and may be added to a project manually
later.
When the project task is completed or cancelled, the corresponding procurement
is updated accordingly. For example, if this procurement corresponds to a sale
order line, the sale order line will be considered delivered when the task is
completed.
""",
'website': 'https://www.odoo.com/page/crm',
'depends': ['project', 'sale', 'project_timesheet', 'sale_timesheet'],
'data': ['views/sale_service_view.xml'],
'demo': ['demo/sale_service_demo.xml'],
'installable': True,
'auto_install': True,
}
|
gpl-3.0
|
fidomason/kbengine
|
kbe/src/lib/python/Lib/encodings/cp037.py
|
266
|
13121
|
""" Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp037',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'\xa2' # 0x4A -> CENT SIGN
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'|' # 0x4F -> VERTICAL LINE
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
'!' # 0x5A -> EXCLAMATION MARK
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'\xac' # 0x5F -> NOT SIGN
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'^' # 0xB0 -> CIRCUMFLEX ACCENT
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'[' # 0xBA -> LEFT SQUARE BRACKET
']' # 0xBB -> RIGHT SQUARE BRACKET
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
lgpl-3.0
|
luistorresm/odoo
|
addons/email_template/email_template.py
|
196
|
30189
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil.relativedelta as relativedelta
import logging
import lxml
import urlparse
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp import tools, api
from openerp.tools.translate import _
from urllib import urlencode, quote as quote
_logger = logging.getLogger(__name__)
def format_tz(pool, cr, uid, dt, tz=False, format=False, context=None):
context = dict(context or {})
if tz:
context['tz'] = tz or pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz'] or "UTC"
timestamp = datetime.datetime.strptime(dt, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
if format:
return ts.strftime(format)
else:
lang = context.get("lang")
lang_params = {}
if lang:
res_lang = pool.get('res.lang')
ids = res_lang.search(cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(cr, uid, ids[0], ["date_format", "time_format"])
format_date = lang_params.get("date_format", '%B-%d-%Y')
format_time = lang_params.get("time_format", '%I-%M %p')
fdate = ts.strftime(format_date)
ftime = ts.strftime(format_time)
return "%s %s%s" % (fdate, ftime, (' (%s)' % tz) if tz else '')
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
# dateutil.relativedelta is an old-style class and cannot be directly
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
# is needed, apparently.
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class email_template(osv.osv):
"Templates for sending email"
_name = "email.template"
_description = 'Email Templates'
_order = 'name'
def default_get(self, cr, uid, fields, context=None):
res = super(email_template, self).default_get(cr, uid, fields, context)
if res.get('model'):
res['model_id'] = self.pool['ir.model'].search(cr, uid, [('model', '=', res.pop('model'))], context=context)[0]
return res
def _replace_local_links(self, cr, uid, html, context=None):
""" Post-processing of html content to replace local links to absolute
links, using web.base.url as base url. """
if not html:
return html
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
(base_scheme, base_netloc, bpath, bparams, bquery, bfragment) = urlparse.urlparse(base_url)
def _process_link(url):
new_url = url
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if not scheme and not netloc:
new_url = urlparse.urlunparse((base_scheme, base_netloc, path, params, query, fragment))
return new_url
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if node.tag == 'a' and node.get('href'):
node.set('href', _process_link(node.get('href')))
elif node.tag == 'img' and not node.get('src', 'data').startswith('data'):
node.set('src', _process_link(node.get('src')))
html = lxml.html.tostring(root, pretty_print=False, method='html')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html
def render_post_process(self, cr, uid, html, context=None):
html = self._replace_local_links(cr, uid, html, context=context)
return html
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
"""Render the given template text, replace mako expressions ``${expr}``
with the result of evaluating these expressions with
an evaluation context containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this mail is
related to
* ``context``: the context passed to the mail composition wizard
:param str template: the template text to render
:param str model: model name of the document record this mail is related to.
:param int res_ids: list of ids of document records those mails are related to.
"""
if context is None:
context = {}
res_ids = filter(None, res_ids) # to avoid browsing [None] below
results = dict.fromkeys(res_ids, u"")
# try to load the template
try:
template = mako_template_env.from_string(tools.ustr(template))
except Exception:
_logger.exception("Failed to load template %r", template)
return results
# prepare template variables
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
records = self.pool[model].browse(cr, uid, res_ids, context=context) or [None]
variables = {
'format_tz': lambda dt, tz=False, format=False, context=context: format_tz(self.pool, cr, uid, dt, tz, format, context),
'user': user,
'ctx': context, # context kw would clash with mako internals
}
for record in records:
res_id = record.id if record else None
variables['object'] = record
try:
render_result = template.render(variables)
except Exception:
_logger.exception("Failed to render template %r using values %r" % (template, variables))
render_result = u""
if render_result == u"False":
render_result = u""
results[res_id] = render_result
if post_process:
for res_id, result in results.iteritems():
results[res_id] = self.render_post_process(cr, uid, result, context=context)
return results
def get_email_template_batch(self, cr, uid, template_id=False, res_ids=None, context=None):
if context is None:
context = {}
if res_ids is None:
res_ids = [None]
results = dict.fromkeys(res_ids, False)
if not template_id:
return results
template = self.browse(cr, uid, template_id, context)
langs = self.render_template_batch(cr, uid, template.lang, template.model, res_ids, context)
for res_id, lang in langs.iteritems():
if lang:
# Use translated template if necessary
ctx = context.copy()
ctx['lang'] = lang
template = self.browse(cr, uid, template.id, ctx)
else:
template = self.browse(cr, uid, int(template_id), context)
results[res_id] = template
return results
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
mod_name = False
if model_id:
mod_name = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
return {'value': {'model': mod_name}}
_columns = {
'name': fields.char('Name'),
'model_id': fields.many2one('ir.model', 'Applies to', help="The kind of document with with this template can be used"),
'model': fields.related('model_id', 'model', type='char', string='Related Document Model',
select=True, store=True, readonly=True),
'lang': fields.char('Language',
help="Optional translation language (ISO code) to select when sending out an email. "
"If not set, the english version will be used. "
"This should usually be a placeholder expression "
"that provides the appropriate language, e.g. "
"${object.partner_id.lang}.",
placeholder="${object.partner_id.lang}"),
'user_signature': fields.boolean('Add Signature',
help="If checked, the user's signature will be appended to the text version "
"of the message"),
'subject': fields.char('Subject', translate=True, help="Subject (placeholders may be used here)",),
'email_from': fields.char('From',
help="Sender address (placeholders may be used here). If not set, the default "
"value will be the author's email alias if configured, or email address."),
'use_default_to': fields.boolean(
'Default recipients',
help="Default recipients of the record:\n"
"- partner (using id on a partner or the partner_id field) OR\n"
"- email (using email_from or email field)"),
'email_to': fields.char('To (Emails)', help="Comma-separated recipient addresses (placeholders may be used here)"),
'partner_to': fields.char('To (Partners)',
help="Comma-separated ids of recipient partners (placeholders may be used here)",
oldname='email_recipients'),
'email_cc': fields.char('Cc', help="Carbon copy recipients (placeholders may be used here)"),
'reply_to': fields.char('Reply-To', help="Preferred response address (placeholders may be used here)"),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing Mail Server', readonly=False,
help="Optional preferred server for outgoing mails. If not set, the highest "
"priority one will be used."),
'body_html': fields.html('Body', translate=True, sanitize=False, help="Rich-text/HTML version of the message (placeholders may be used here)"),
'report_name': fields.char('Report Filename', translate=True,
help="Name to use for the generated report file (may contain placeholders)\n"
"The extension can be omitted and will then come from the report type."),
'report_template': fields.many2one('ir.actions.report.xml', 'Optional report to print and attach'),
'ref_ir_act_window': fields.many2one('ir.actions.act_window', 'Sidebar action', readonly=True, copy=False,
help="Sidebar action to make this template available on records "
"of the related document model"),
'ref_ir_value': fields.many2one('ir.values', 'Sidebar Button', readonly=True, copy=False,
help="Sidebar button to open the sidebar action"),
'attachment_ids': fields.many2many('ir.attachment', 'email_template_attachment_rel', 'email_template_id',
'attachment_id', 'Attachments',
help="You may attach files to this template, to be added to all "
"emails created from this template"),
'auto_delete': fields.boolean('Auto Delete', help="Permanently delete this email after sending it, to save space"),
# Fake fields used to implement the placeholder assistant
'model_object_field': fields.many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship."),
'sub_object': fields.many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to."),
'sub_model_object_field': fields.many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model)."),
'null_value': fields.char('Default Value', help="Optional value to use if the target field is empty"),
'copyvalue': fields.char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field."),
}
_defaults = {
'auto_delete': True,
}
def create_action(self, cr, uid, ids, context=None):
action_obj = self.pool.get('ir.actions.act_window')
data_obj = self.pool.get('ir.model.data')
for template in self.browse(cr, uid, ids, context=context):
src_obj = template.model_id.model
model_data_id = data_obj._get_id(cr, uid, 'mail', 'email_compose_message_wizard_form')
res_id = data_obj.browse(cr, uid, model_data_id, context=context).res_id
button_name = _('Send Mail (%s)') % template.name
act_id = action_obj.create(cr, SUPERUSER_ID, {
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mail.compose.message',
'src_model': src_obj,
'view_type': 'form',
'context': "{'default_composition_mode': 'mass_mail', 'default_template_id' : %d, 'default_use_template': True}" % (template.id),
'view_mode':'form,tree',
'view_id': res_id,
'target': 'new',
'auto_refresh':1
}, context)
ir_values_id = self.pool.get('ir.values').create(cr, SUPERUSER_ID, {
'name': button_name,
'model': src_obj,
'key2': 'client_action_multi',
'value': "ir.actions.act_window,%s" % act_id,
'object': True,
}, context)
template.write({
'ref_ir_act_window': act_id,
'ref_ir_value': ir_values_id,
})
return True
def unlink_action(self, cr, uid, ids, context=None):
for template in self.browse(cr, uid, ids, context=context):
try:
if template.ref_ir_act_window:
self.pool.get('ir.actions.act_window').unlink(cr, SUPERUSER_ID, template.ref_ir_act_window.id, context)
if template.ref_ir_value:
ir_values_obj = self.pool.get('ir.values')
ir_values_obj.unlink(cr, SUPERUSER_ID, template.ref_ir_value.id, context)
except Exception:
raise osv.except_osv(_("Warning"), _("Deletion of the action record failed."))
return True
def unlink(self, cr, uid, ids, context=None):
self.unlink_action(cr, uid, ids, context=context)
return super(email_template, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
template = self.browse(cr, uid, id, context=context)
default = dict(default or {},
name=_("%s (copy)") % template.name)
return super(email_template, self).copy(cr, uid, id, default, context)
def build_expression(self, field_name, sub_field_name, null_value):
"""Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:param null_value: default value if the target value is empty
:return: final placeholder expression
"""
expression = ''
if field_name:
expression = "${object." + field_name
if sub_field_name:
expression += "." + sub_field_name
if null_value:
expression += " or '''%s'''" % null_value
expression += "}"
return expression
def onchange_sub_model_object_value_field(self, cr, uid, ids, model_object_field, sub_model_object_field=False, null_value=None, context=None):
result = {
'sub_object': False,
'copyvalue': False,
'sub_model_object_field': False,
'null_value': False
}
if model_object_field:
fields_obj = self.pool.get('ir.model.fields')
field_value = fields_obj.browse(cr, uid, model_object_field, context)
if field_value.ttype in ['many2one', 'one2many', 'many2many']:
res_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', field_value.relation)], context=context)
sub_field_value = False
if sub_model_object_field:
sub_field_value = fields_obj.browse(cr, uid, sub_model_object_field, context)
if res_ids:
result.update({
'sub_object': res_ids[0],
'copyvalue': self.build_expression(field_value.name, sub_field_value and sub_field_value.name or False, null_value or False),
'sub_model_object_field': sub_model_object_field or False,
'null_value': null_value or False
})
else:
result.update({
'copyvalue': self.build_expression(field_value.name, False, null_value or False),
'null_value': null_value or False
})
return {'value': result}
def generate_recipients_batch(self, cr, uid, results, template_id, res_ids, context=None):
"""Generates the recipients of the template. Default values can ben generated
instead of the template values if requested by template or context.
Emails (email_to, email_cc) can be transformed into partners if requested
in the context. """
if context is None:
context = {}
template = self.browse(cr, uid, template_id, context=context)
if template.use_default_to or context.get('tpl_force_default_to'):
ctx = dict(context, thread_model=template.model)
default_recipients = self.pool['mail.thread'].message_get_default_recipients(cr, uid, res_ids, context=ctx)
for res_id, recipients in default_recipients.iteritems():
results[res_id].pop('partner_to', None)
results[res_id].update(recipients)
for res_id, values in results.iteritems():
partner_ids = values.get('partner_ids', list())
if context and context.get('tpl_partners_only'):
mails = tools.email_split(values.pop('email_to', '')) + tools.email_split(values.pop('email_cc', ''))
for mail in mails:
partner_id = self.pool.get('res.partner').find_or_create(cr, uid, mail, context=context)
partner_ids.append(partner_id)
partner_to = values.pop('partner_to', '')
if partner_to:
# placeholders could generate '', 3, 2 due to some empty field values
tpl_partner_ids = [int(pid) for pid in partner_to.split(',') if pid]
partner_ids += self.pool['res.partner'].exists(cr, SUPERUSER_ID, tpl_partner_ids, context=context)
results[res_id]['partner_ids'] = partner_ids
return results
def generate_email_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):
"""Generates an email from the template for given the given model based on
records given by res_ids.
:param template_id: id of the template to render.
:param res_id: id of the record to use for rendering the template (model
is taken from template definition)
:returns: a dict containing all relevant fields for creating a new
mail.mail entry, with one extra key ``attachments``, in the
format [(report_name, data)] where data is base64 encoded.
"""
if context is None:
context = {}
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to']
report_xml_pool = self.pool.get('ir.actions.report.xml')
res_ids_to_templates = self.get_email_template_batch(cr, uid, template_id, res_ids, context)
# templates: res_id -> template; template -> res_ids
templates_to_res_ids = {}
for res_id, template in res_ids_to_templates.iteritems():
templates_to_res_ids.setdefault(template, []).append(res_id)
results = dict()
for template, template_res_ids in templates_to_res_ids.iteritems():
# generate fields value for all res_ids linked to the current template
ctx = context.copy()
if template.lang:
ctx['lang'] = template._context.get('lang')
for field in fields:
generated_field_values = self.render_template_batch(
cr, uid, getattr(template, field), template.model, template_res_ids,
post_process=(field == 'body_html'),
context=ctx)
for res_id, field_value in generated_field_values.iteritems():
results.setdefault(res_id, dict())[field] = field_value
# compute recipients
results = self.generate_recipients_batch(cr, uid, results, template.id, template_res_ids, context=context)
# update values for all res_ids
for res_id in template_res_ids:
values = results[res_id]
# body: add user signature, sanitize
if 'body_html' in fields and template.user_signature:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
if signature:
values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)
if values.get('body_html'):
values['body'] = tools.html_sanitize(values['body_html'])
# technical settings
values.update(
mail_server_id=template.mail_server_id.id or False,
auto_delete=template.auto_delete,
model=template.model,
res_id=res_id or False,
attachment_ids=[attach.id for attach in template.attachment_ids],
)
# Add report in attachments: generate once for all template_res_ids
if template.report_template:
for res_id in template_res_ids:
attachments = []
report_name = self.render_template(cr, uid, template.report_name, template.model, res_id, context=ctx)
report = report_xml_pool.browse(cr, uid, template.report_template.id, context)
report_service = report.report_name
if report.report_type in ['qweb-html', 'qweb-pdf']:
result, format = self.pool['report'].get_pdf(cr, uid, [res_id], report_service, context=ctx), 'pdf'
else:
result, format = openerp.report.render_report(cr, uid, [res_id], report_service, {'model': template.model}, ctx)
# TODO in trunk, change return format to binary to match message_post expected format
result = base64.b64encode(result)
if not report_name:
report_name = 'report.' + report_service
ext = "." + format
if not report_name.endswith(ext):
report_name += ext
attachments.append((report_name, result))
results[res_id]['attachments'] = attachments
return results
@api.cr_uid_id_context
def send_mail(self, cr, uid, template_id, res_id, force_send=False, raise_exception=False, context=None):
"""Generates a new mail message for the given template and record,
and schedules it for delivery through the ``mail`` module's scheduler.
:param int template_id: id of the template to render
:param int res_id: id of the record to render the template with
(model is taken from the template)
:param bool force_send: if True, the generated mail.message is
immediately sent after being created, as if the scheduler
was executed for this message only.
:returns: id of the mail.message that was created
"""
if context is None:
context = {}
mail_mail = self.pool.get('mail.mail')
ir_attachment = self.pool.get('ir.attachment')
# create a mail_mail based on values, without attachments
values = self.generate_email(cr, uid, template_id, res_id, context=context)
if not values.get('email_from'):
raise osv.except_osv(_('Warning!'), _("Sender email is missing or empty after template rendering. Specify one to deliver your message"))
values['recipient_ids'] = [(4, pid) for pid in values.get('partner_ids', list())]
attachment_ids = values.pop('attachment_ids', [])
attachments = values.pop('attachments', [])
msg_id = mail_mail.create(cr, uid, values, context=context)
mail = mail_mail.browse(cr, uid, msg_id, context=context)
# manage attachments
for attachment in attachments:
attachment_data = {
'name': attachment[0],
'datas_fname': attachment[0],
'datas': attachment[1],
'res_model': 'mail.message',
'res_id': mail.mail_message_id.id,
}
context = dict(context)
context.pop('default_type', None)
attachment_ids.append(ir_attachment.create(cr, uid, attachment_data, context=context))
if attachment_ids:
values['attachment_ids'] = [(6, 0, attachment_ids)]
mail_mail.write(cr, uid, msg_id, {'attachment_ids': [(6, 0, attachment_ids)]}, context=context)
if force_send:
mail_mail.send(cr, uid, [msg_id], raise_exception=raise_exception, context=context)
return msg_id
# Compatibility method
def render_template(self, cr, uid, template, model, res_id, context=None):
return self.render_template_batch(cr, uid, template, model, [res_id], context)[res_id]
def get_email_template(self, cr, uid, template_id=False, record_id=None, context=None):
return self.get_email_template_batch(cr, uid, template_id, [record_id], context)[record_id]
def generate_email(self, cr, uid, template_id, res_id, context=None):
return self.generate_email_batch(cr, uid, template_id, [res_id], context)[res_id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kaushik94/boto
|
boto/mws/__init__.py
|
429
|
1101
|
# Copyright (c) 2008, Chris Moyer http://coredumped.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
mit
|
GoogleCloudPlatform/Data-Pipeline
|
app/src/pipelines/shardstage_test.py
|
1
|
3800
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ShardStage unit tests."""
import mock
import logging
from src import basetest
from src.pipelines import shardstage
class SimpleShardStage(shardstage.ShardStage):
"""A Simple stage that just keeps track of what config it was started with."""
def __init__(self, config):
super(shardstage.ShardStage, self).__init__(config)
self.simple_shard_stage_config = config
class ShardStageTest(basetest.TestCase):
def setUp(self):
super(basetest.TestCase, self).setUp()
self.gcscompositor_mock = mock.patch(
'src.'
'pipelines.stages.gcscompositor.GcsCompositor').start()
def testNoNeedToShard(self):
configs = [
{},
{'start': 100},
{'length': 100, 'shardSize': 100},
{'length': 100, 'shardSize': 400},
]
for config in configs:
stage = SimpleShardStage(config)
self.assertEquals(([], []), stage.ShardStage(config))
def testNeedsToShard(self):
configs_and_results = [
({'length': 100, 'shardSize': 50, 'sinks': ['gs://bucket/name'],
'shardPrefix': 'shard', 'contentType': 'text/csv'},
{'shards': [
{'start': 0, 'length': 50, 'shardSize': 50,
'shardPrefix': 'shard', 'contentType': 'text/csv',
'sinks': ['gs://bucket/name/shard0']},
{'start': 50, 'length': 50, 'shardSize': 50,
'shardPrefix': 'shard', 'contentType': 'text/csv',
'sinks': ['gs://bucket/name/shard1']},
],
'compositors': [{
'sources': ['gs://bucket/name/shard0', 'gs://bucket/name/shard1'],
'contentType': 'text/csv',
'deleteSources': True,
'sinks': ['gs://bucket/name'],
}]}),
({'length': 100, 'shardSize': 99, 'sinks': ['gs://bucket/name'],
'contentType': 'text/csv'},
{'shards': [
{'start': 0, 'length': 50, 'shardSize': 50,
'contentType': 'text/csv',
'sinks': ['gs://bucket/name/0']},
{'start': 50, 'length': 50, 'shardSize': 50,
'contentType': 'text/csv',
'sinks': ['gs://bucket/name/1']},
],
'compositors': [{
'sources': ['gs://bucket/name/0', 'gs://bucket/name/1'],
'contentType': 'text/csv',
'deleteSources': True,
'sinks': ['gs://bucket/name'],
}]})
]
for config, result in configs_and_results:
with mock.patch('uuid.uuid4', side_effect=[str(x) for x in range(10)]):
stage = SimpleShardStage(config)
(shards, compositors) = stage.ShardStage(config)
self.assertEquals(len(result['shards']), len(shards))
self.assertEquals(len(result['compositors']), len(compositors))
for expected, actual in zip(result['shards'], shards):
self.assertSameStructure(expected, actual.simple_shard_stage_config)
for expected, actual in zip(result['compositors'], compositors):
gcscompositor_config = self.gcscompositor_mock.call_args[0][0]
self.assertSameStructure(expected, gcscompositor_config)
if __name__ == '__main__':
basetest.main()
|
apache-2.0
|
sbellem/django
|
django/utils/regex_helper.py
|
432
|
12673
|
"""
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
from __future__ import unicode_literals
from django.utils import six
from django.utils.six.moves import zip
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": "0",
"D": "x",
"s": " ",
"S": "x",
"w": "x",
"W": "!",
"Z": None,
}
class Choice(list):
"""
Used to represent multiple possibilities at this point in a pattern string.
We use a distinguished type, rather than a list, so that the usage in the
code is clear.
"""
class Group(list):
"""
Used to represent a capturing group in the pattern string.
"""
class NonCapture(list):
"""
Used to represent a non-capturing group in the pattern string.
"""
def normalize(pattern):
"""
Given a reg-exp pattern, normalizes it to an iterable of forms that
suffice for reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(4) Ignore comments, look-ahead and look-behind assertions, and any of the
reg-exp flags that won't change what we construct ("iLmsu"). "(?x)" is
an error, however.
(5) Raise an error on any disjunctive ('|') constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = next(pattern_iter)
except StopIteration:
return [('', [])]
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError('Awaiting Implementation')
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = next(pattern_iter)
result.append(ch)
ch, escaped = next(pattern_iter)
while escaped or ch != ']':
ch, escaped = next(pattern_iter)
elif ch == '(':
# Some kind of group.
ch, escaped = next(pattern_iter)
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group((("%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = next(pattern_iter)
if ch in "iLmsu#!=<":
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = next(pattern_iter)
if ch not in ('<', '='):
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
if ch == '<':
terminal_char = '>'
# We are in a named backreference.
else:
terminal_char = ')'
name = []
ch, escaped = next(pattern_iter)
while ch != terminal_char:
name.append(ch)
ch, escaped = next(pattern_iter)
param = ''.join(name)
# Named backreferences have already consumed the
# parenthesis.
if terminal_char != ')':
result.append(Group((("%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
else:
result.append(Group((("%%(%s)s" % param), None)))
elif ch in "*?+{":
# Quantifiers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quantifier, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = next(pattern_iter)
else:
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return [('', [])]
return list(zip(*flatten_result(result)))
def next_char(input_iter):
"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yields the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = next(input_iter)
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. We want to walk to the
close of this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Returns the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Returns True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turns the given source sequence into a list of reg-exp possibilities and
their arguments. Returns a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = ['']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, six.string_types):
continue
piece = ''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = ''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
|
bsd-3-clause
|
jitendra29/servo
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/trie/datrie.py
|
785
|
1166
|
from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
|
mpl-2.0
|
dimm0/tacc_stats
|
tacc_stats/pickler/tests/pickler_test.py
|
1
|
4459
|
from __future__ import print_function
import os, sys
from nose import with_setup
import cPickle as pickle
from tacc_stats.pickler import job_pickles
from tacc_stats.pickler import job_stats, batch_acct
sys.modules['pickler.job_stats'] = job_stats
sys.modules['pickler.batch_acct'] = batch_acct
path = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(path, 'data')
def setup_func():
a = open(os.path.join(path,"cfg.py"),"w")
a.write('archive_dir = \"' + data_dir + '\"\n'
'acct_path = \"'+ data_dir +'/tacc_jobs_completed\"\n'
'host_list_dir= \"' + data_dir + '\"\n'
'pickles_dir= \"' + path + '\"\n'
'host_name_ext= \"platform.extension\"\n'
'batch_system = \"SLURM\"\n'
'seek=0\n')
a.close()
def teardown_func():
os.remove(os.path.join(path,"cfg.py"))
try:
os.remove(os.path.join(path,"cfg.pyc"))
except: pass
os.remove(os.path.join(path,'2013-10-01',"1835740"))
os.rmdir(os.path.join(path,'2013-10-01'))
os.remove(os.path.join(path,'2014-10-31',"20"))
os.rmdir(os.path.join(path,'2014-10-31'))
@with_setup(setup_func, teardown_func)
def test():
from tacc_stats.pickler.tests import cfg
pickle_options = { 'processes' : 1,
'start' : '2013-10-01',
'end' : '2014-11-01',
'pickle_dir' : cfg.pickles_dir,
'batch_system' : cfg.batch_system,
'acct_path' : cfg.acct_path,
'archive_dir' : cfg.archive_dir,
'host_list_dir' : cfg.host_list_dir,
'host_name_ext' : cfg.host_name_ext,
'seek' : cfg.seek
}
pickler = job_pickles.JobPickles(**pickle_options)
pickler.run()
assert os.path.isfile(os.path.join(path,'2013-10-01','1835740')) == True
print("Pickle file generated.")
old = pickle.load(open(os.path.join(path,'1835740_ref')))
new = pickle.load(open(os.path.join(path,'2013-10-01','1835740')))
compare_newtoold(new,old)
old = pickle.load(open(os.path.join(path,'20_ref')))
new = pickle.load(open(os.path.join(path,'2014-10-31','20')))
compare_newtoold(new,old)
@with_setup(setup_func, teardown_func)
def test_ids():
from tacc_stats.pickler.tests import cfg
pickle_options = { 'processes' : 1,
'pickle_dir' : cfg.pickles_dir,
'batch_system' : cfg.batch_system,
'acct_path' : cfg.acct_path,
'archive_dir' : cfg.archive_dir,
'host_list_dir' : cfg.host_list_dir,
'host_name_ext' : cfg.host_name_ext,
'seek' : cfg.seek
}
pickler = job_pickles.JobPickles(**pickle_options)
pickler.run(['1835740'])
pickler.run(['20'])
assert os.path.isfile(os.path.join(path,'2013-10-01','1835740')) == True
print("Pickle file generated.")
old = pickle.load(open(os.path.join(path,'1835740_ref')))
new = pickle.load(open(os.path.join(path,'2013-10-01','1835740')))
compare_newtoold(new,old)
old = pickle.load(open(os.path.join(path,'20_ref')))
new = pickle.load(open(os.path.join(path,'2014-10-31','20')))
compare_newtoold(new,old)
def compare_newtoold(new,old):
assert new.id == old.id
for i in range(len(old.times)):
assert new.times[i] == old.times[i]
for i in range(len(old.hosts.keys())):
assert old.hosts.keys()[i] == new.hosts.keys()[i]
print('id, keys, and times match.')
for host_name, host in old.hosts.iteritems():
for type_name, type_stats in host.stats.iteritems():
if type_name =='ib': continue
for dev_name, dev_stats in type_stats.iteritems():
for i in range(len(dev_stats)):
for j in range(len(dev_stats[i])):
if new.hosts[host_name].stats[type_name][dev_name][i][j]-dev_stats[i][j] != 0.0:
print(new.times[i],host_name,type_name,dev_name,new.hosts[host_name].stats[type_name][dev_name][i][j],dev_stats[i][j])
assert new.hosts[host_name].stats[type_name][dev_name][i][j] == dev_stats[i][j]
print('stats match.')
|
lgpl-2.1
|
Slezhuk/ansible
|
lib/ansible/modules/network/nmcli.py
|
21
|
41915
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Chris Long <[email protected]> <[email protected]>
#
# This file is a module for Ansible that interacts with Network Manager
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION='''
---
module: nmcli
author: "Chris Long (@alcamie101)"
short_description: Manage Networking
requirements: [ nmcli, dbus ]
version_added: "2.0"
description:
- Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc.
options:
state:
required: True
choices: [ present, absent ]
description:
- Whether the device should exist or not, taking action if the state is different from what is stated.
autoconnect:
required: False
default: "yes"
choices: [ "yes", "no" ]
description:
- Whether the connection should start on boot.
- Whether the connection profile can be automatically activated
conn_name:
required: True
description:
- 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: <type>[-<ifname>][-<num>]'
ifname:
required: False
default: conn_name
description:
- Where IFNAME will be the what we call the interface name.
- interface to bind the connection to. The connection will only be applicable to this interface name.
- A special value of "*" can be used for interface-independent connections.
- The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
type:
required: False
choices: [ ethernet, team, team-slave, bond, bond-slave, bridge, vlan ]
description:
- This is the type of device or network connection that you wish to create.
mode:
required: False
choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ]
default: balence-rr
description:
- This is the type of device or network connection that you wish to create for a bond, team or bridge.
master:
required: False
default: None
description:
- master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
ip4:
required: False
default: None
description:
- 'The IPv4 address to this interface using this format ie: "192.0.2.24/24"'
gw4:
required: False
description:
- 'The IPv4 gateway for this interface using this format ie: "192.0.2.1"'
dns4:
required: False
default: None
description:
- 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["192.0.2.53", "198.51.100.53"]'
ip6:
required: False
default: None
description:
- 'The IPv6 address to this interface using this format ie: "abbe::cafe"'
gw6:
required: False
default: None
description:
- 'The IPv6 gateway for this interface using this format ie: "2001:db8::1"'
dns6:
required: False
description:
- 'A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ["2001:4860:4860::8888 2001:4860:4860::8844"]'
mtu:
required: False
default: 1500
description:
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
primary:
required: False
default: None
description:
- This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'
miimon:
required: False
default: 100
description:
- This is only used with bond - miimon
downdelay:
required: False
default: None
description:
- This is only used with bond - downdelay
updelay:
required: False
default: None
description:
- This is only used with bond - updelay
arp_interval:
required: False
default: None
description:
- This is only used with bond - ARP interval
arp_ip_target:
required: False
default: None
description:
- This is only used with bond - ARP IP target
stp:
required: False
default: None
description:
- This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge
priority:
required: False
default: 128
description:
- This is only used with 'bridge' - sets STP priority
forwarddelay:
required: False
default: 15
description:
- This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds
hellotime:
required: False
default: 2
description:
- This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds
maxage:
required: False
default: 20
description:
- This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds
ageingtime:
required: False
default: 300
description:
- This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds
mac:
required: False
default: None
description:
- >
This is only used with bridge - MAC address of the bridge
(note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)
slavepriority:
required: False
default: 32
description:
- This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave
path_cost:
required: False
default: 100
description:
- This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave
hairpin:
required: False
default: yes
description:
- This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
frame was received on.
vlanid:
required: False
default: None
description:
- This is only used with VLAN - VLAN ID in range <0-4095>
vlandev:
required: False
default: None
description:
- This is only used with VLAN - parent device this VLAN is on, can use ifname
flags:
required: False
default: None
description:
- This is only used with VLAN - flags
ingress:
required: False
default: None
description:
- This is only used with VLAN - VLAN ingress priority mapping
egress:
required: False
default: None
description:
- This is only used with VLAN - VLAN egress priority mapping
'''
EXAMPLES='''
# These examples are using the following inventory:
#
# ## Directory layout:
#
# |_/inventory/cloud-hosts
# | /group_vars/openstack-stage.yml
# | /host_vars/controller-01.openstack.host.com
# | /host_vars/controller-02.openstack.host.com
# |_/playbook/library/nmcli.py
# | /playbook-add.yml
# | /playbook-del.yml
# ```
#
# ## inventory examples
# ### groups_vars
# ```yml
# ---
# #devops_os_define_network
# storage_gw: "192.0.2.254"
# external_gw: "198.51.100.254"
# tenant_gw: "203.0.113.254"
#
# #Team vars
# nmcli_team:
# - conn_name: tenant
# ip4: '{{ tenant_ip }}'
# gw4: '{{ tenant_gw }}'
# - conn_name: external
# ip4: '{{ external_ip }}'
# gw4: '{{ external_gw }}'
# - conn_name: storage
# ip4: '{{ storage_ip }}'
# gw4: '{{ storage_gw }}'
# nmcli_team_slave:
# - conn_name: em1
# ifname: em1
# master: tenant
# - conn_name: em2
# ifname: em2
# master: tenant
# - conn_name: p2p1
# ifname: p2p1
# master: storage
# - conn_name: p2p2
# ifname: p2p2
# master: external
#
# #bond vars
# nmcli_bond:
# - conn_name: tenant
# ip4: '{{ tenant_ip }}'
# gw4: ''
# mode: balance-rr
# - conn_name: external
# ip4: '{{ external_ip }}'
# gw4: ''
# mode: balance-rr
# - conn_name: storage
# ip4: '{{ storage_ip }}'
# gw4: '{{ storage_gw }}'
# mode: balance-rr
# nmcli_bond_slave:
# - conn_name: em1
# ifname: em1
# master: tenant
# - conn_name: em2
# ifname: em2
# master: tenant
# - conn_name: p2p1
# ifname: p2p1
# master: storage
# - conn_name: p2p2
# ifname: p2p2
# master: external
#
# #ethernet vars
# nmcli_ethernet:
# - conn_name: em1
# ifname: em1
# ip4: '{{ tenant_ip }}'
# gw4: '{{ tenant_gw }}'
# - conn_name: em2
# ifname: em2
# ip4: '{{ tenant_ip1 }}'
# gw4: '{{ tenant_gw }}'
# - conn_name: p2p1
# ifname: p2p1
# ip4: '{{ storage_ip }}'
# gw4: '{{ storage_gw }}'
# - conn_name: p2p2
# ifname: p2p2
# ip4: '{{ external_ip }}'
# gw4: '{{ external_gw }}'
# ```
#
# ### host_vars
# ```yml
# ---
# storage_ip: "192.0.2.91/23"
# external_ip: "198.51.100.23/21"
# tenant_ip: "203.0.113.77/23"
# ```
## playbook-add.yml example
---
- hosts: openstack-stage
remote_user: root
tasks:
- name: install needed network manager libs
yum:
name: '{{ item }}'
state: installed
with_items:
- NetworkManager-glib
- libnm-qt-devel.x86_64
- nm-connection-editor.x86_64
- libsemanage-python
- policycoreutils-python
##### Working with all cloud nodes - Teaming
- name: try nmcli add team - conn_name only & ip4 gw4
nmcli:
type: team
conn_name: '{{ item.conn_name }}'
ip4: '{{ item.ip4 }}'
gw4: '{{ item.gw4 }}'
state: present
with_items:
- '{{ nmcli_team }}'
- name: try nmcli add teams-slave
nmcli:
type: team-slave
conn_name: '{{ item.conn_name }}'
ifname: '{{ item.ifname }}'
master: '{{ item.master }}'
state: present
with_items:
- '{{ nmcli_team_slave }}'
###### Working with all cloud nodes - Bonding
- name: try nmcli add bond - conn_name only & ip4 gw4 mode
nmcli:
type: bond
conn_name: '{{ item.conn_name }}'
ip4: '{{ item.ip4 }}'
gw4: '{{ item.gw4 }}'
mode: '{{ item.mode }}'
state: present
with_items:
- '{{ nmcli_bond }}'
- name: try nmcli add bond-slave
nmcli:
type: bond-slave
conn_name: '{{ item.conn_name }}'
ifname: '{{ item.ifname }}'
master: '{{ item.master }}'
state: present
with_items:
- '{{ nmcli_bond_slave }}'
##### Working with all cloud nodes - Ethernet
- name: nmcli add Ethernet - conn_name only & ip4 gw4
nmcli:
type: ethernet
conn_name: '{{ item.conn_name }}'
ip4: '{{ item.ip4 }}'
gw4: '{{ item.gw4 }}'
state: present
with_items:
- '{{ nmcli_ethernet }}'
## playbook-del.yml example
- hosts: openstack-stage
remote_user: root
tasks:
- name: try nmcli del team - multiple
nmcli:
conn_name: '{{ item.conn_name }}'
state: absent
with_items:
- conn_name: em1
- conn_name: em2
- conn_name: p1p1
- conn_name: p1p2
- conn_name: p2p1
- conn_name: p2p2
- conn_name: tenant
- conn_name: storage
- conn_name: external
- conn_name: team-em1
- conn_name: team-em2
- conn_name: team-p1p1
- conn_name: team-p1p2
- conn_name: team-p2p1
- conn_name: team-p2p2
# To add an Ethernet connection with static IP configuration, issue a command as follows
- nmcli:
conn_name: my-eth1
ifname: eth1
type: ethernet
ip4: 192.0.2.100/24
gw4: 192.0.2.1
state: present
# To add an Team connection with static IP configuration, issue a command as follows
- nmcli:
conn_name: my-team1
ifname: my-team1
type: team
ip4: 192.0.2.100/24
gw4: 192.0.2.1
state: present
autoconnect: yes
# Optionally, at the same time specify IPv6 addresses for the device as follows:
- nmcli:
conn_name: my-eth1
ifname: eth1
type: ethernet
ip4: 192.0.2.100/24
gw4: 192.0.2.1
ip6: '2001:db8::cafe'
gw6: '2001:db8::1'
state: present
# To add two IPv4 DNS server addresses:
- nmcli:
conn_name: my-eth1
dns4:
- 192.0.2.53
- 198.51.100.53
state: present
# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows
- nmcli:
ctype: ethernet
name: my-eth1
ifname: '*'
state: present
# To change the property of a setting e.g. MTU, issue a command as follows:
- nmcli:
conn_name: my-eth1
mtu: 9000
type: ethernet
state: present
# Exit Status's:
# - nmcli exits with status 0 if it succeeds, a value greater than 0 is
# returned if an error occurs.
# - 0 Success - indicates the operation succeeded
# - 1 Unknown or unspecified error
# - 2 Invalid user input, wrong nmcli invocation
# - 3 Timeout expired (see --wait option)
# - 4 Connection activation failed
# - 5 Connection deactivation failed
# - 6 Disconnecting device failed
# - 7 Connection deletion failed
# - 8 NetworkManager is not running
# - 9 nmcli and NetworkManager versions mismatch
# - 10 Connection, device, or access point does not exist.
'''
# import ansible.module_utils.basic
import os
import sys
HAVE_DBUS=False
try:
import dbus
HAVE_DBUS=True
except ImportError:
pass
HAVE_NM_CLIENT=False
try:
from gi.repository import NetworkManager, NMClient
HAVE_NM_CLIENT=True
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
class Nmcli(object):
"""
This is the generic nmcli manipulation class that is subclassed based on platform.
A subclass may wish to override the following action methods:-
- create_connection()
- delete_connection()
- modify_connection()
- show_connection()
- up_connection()
- down_connection()
All subclasses MUST define platform and distribution (which may be None).
"""
platform='Generic'
distribution=None
if HAVE_DBUS:
bus=dbus.SystemBus()
# The following is going to be used in dbus code
DEVTYPES={1: "Ethernet",
2: "Wi-Fi",
5: "Bluetooth",
6: "OLPC",
7: "WiMAX",
8: "Modem",
9: "InfiniBand",
10: "Bond",
11: "VLAN",
12: "ADSL",
13: "Bridge",
14: "Generic",
15: "Team"
}
STATES={0: "Unknown",
10: "Unmanaged",
20: "Unavailable",
30: "Disconnected",
40: "Prepare",
50: "Config",
60: "Need Auth",
70: "IP Config",
80: "IP Check",
90: "Secondaries",
100: "Activated",
110: "Deactivating",
120: "Failed"
}
def __init__(self, module):
self.module=module
self.state=module.params['state']
self.autoconnect=module.params['autoconnect']
self.conn_name=module.params['conn_name']
self.master=module.params['master']
self.ifname=module.params['ifname']
self.type=module.params['type']
self.ip4=module.params['ip4']
self.gw4=module.params['gw4']
self.dns4=module.params['dns4']
self.ip6=module.params['ip6']
self.gw6=module.params['gw6']
self.dns6=module.params['dns6']
self.mtu=module.params['mtu']
self.stp=module.params['stp']
self.priority=module.params['priority']
self.mode=module.params['mode']
self.miimon=module.params['miimon']
self.downdelay=module.params['downdelay']
self.updelay=module.params['updelay']
self.arp_interval=module.params['arp_interval']
self.arp_ip_target=module.params['arp_ip_target']
self.slavepriority=module.params['slavepriority']
self.forwarddelay=module.params['forwarddelay']
self.hellotime=module.params['hellotime']
self.maxage=module.params['maxage']
self.ageingtime=module.params['ageingtime']
self.mac=module.params['mac']
self.vlanid=module.params['vlanid']
self.vlandev=module.params['vlandev']
self.flags=module.params['flags']
self.ingress=module.params['ingress']
self.egress=module.params['egress']
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def merge_secrets(self, proxy, config, setting_name):
try:
# returns a dict of dicts mapping name::setting, where setting is a dict
# mapping key::value. Each member of the 'setting' dict is a secret
secrets=proxy.GetSecrets(setting_name)
# Copy the secrets into our connection config
for setting in secrets:
for key in secrets[setting]:
config[setting_name][key]=secrets[setting][key]
except Exception as e:
pass
def dict_to_string(self, d):
# Try to trivially translate a dictionary's elements into nice string
# formatting.
dstr=""
for key in d:
val=d[key]
str_val=""
add_string=True
if isinstance(val, dbus.Array):
for elt in val:
if isinstance(elt, dbus.Byte):
str_val+="%s " % int(elt)
elif isinstance(elt, dbus.String):
str_val+="%s" % elt
elif isinstance(val, dbus.Dictionary):
dstr+=self.dict_to_string(val)
add_string=False
else:
str_val=val
if add_string:
dstr+="%s: %s\n" % ( key, str_val)
return dstr
def connection_to_string(self, config):
# dump a connection configuration to use in list_connection_info
setting_list=[]
for setting_name in config:
setting_list.append(self.dict_to_string(config[setting_name]))
return setting_list
# print ""
def bool_to_string(self, boolean):
if boolean:
return "yes"
else:
return "no"
def list_connection_info(self):
# Ask the settings service for the list of connections it provides
bus=dbus.SystemBus()
service_name="org.freedesktop.NetworkManager"
proxy=bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings")
settings=dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
connection_paths=settings.ListConnections()
connection_list=[]
# List each connection's name, UUID, and type
for path in connection_paths:
con_proxy=bus.get_object(service_name, path)
settings_connection=dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
config=settings_connection.GetSettings()
# Now get secrets too; we grab the secrets for each type of connection
# (since there isn't a "get all secrets" call because most of the time
# you only need 'wifi' secrets or '802.1x' secrets, not everything) and
# merge that into the configuration data - To use at a later stage
self.merge_secrets(settings_connection, config, '802-11-wireless')
self.merge_secrets(settings_connection, config, '802-11-wireless-security')
self.merge_secrets(settings_connection, config, '802-1x')
self.merge_secrets(settings_connection, config, 'gsm')
self.merge_secrets(settings_connection, config, 'cdma')
self.merge_secrets(settings_connection, config, 'ppp')
# Get the details of the 'connection' setting
s_con=config['connection']
connection_list.append(s_con['id'])
connection_list.append(s_con['uuid'])
connection_list.append(s_con['type'])
connection_list.append(self.connection_to_string(config))
return connection_list
def connection_exists(self):
# we are going to use name and type in this instance to find if that connection exists and is of type x
connections=self.list_connection_info()
for con_item in connections:
if self.conn_name==con_item:
return True
def down_connection(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# if self.connection_exists():
cmd.append('con')
cmd.append('down')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def up_connection(self):
cmd=[self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('up')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def create_connection_team(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating team interface
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('team')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def modify_connection_team(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying team interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw6)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
# Can't use MTU with team
return cmd
def create_connection_team_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating team-slave interface
cmd.append('connection')
cmd.append('add')
cmd.append('type')
cmd.append(self.type)
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append('master')
if self.conn_name is not None:
cmd.append(self.master)
# if self.mtu is not None:
# cmd.append('802-3-ethernet.mtu')
# cmd.append(self.mtu)
return cmd
def modify_connection_team_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying team-slave interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
cmd.append('connection.master')
cmd.append(self.master)
if self.mtu is not None:
cmd.append('802-3-ethernet.mtu')
cmd.append(self.mtu)
return cmd
def create_connection_bond(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bond interface
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('bond')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
if self.mode is not None:
cmd.append('mode')
cmd.append(self.mode)
if self.miimon is not None:
cmd.append('miimon')
cmd.append(self.miimon)
if self.downdelay is not None:
cmd.append('downdelay')
cmd.append(self.downdelay)
if self.downdelay is not None:
cmd.append('updelay')
cmd.append(self.updelay)
if self.downdelay is not None:
cmd.append('arp-interval')
cmd.append(self.arp_interval)
if self.downdelay is not None:
cmd.append('arp-ip-target')
cmd.append(self.arp_ip_target)
return cmd
def modify_connection_bond(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bond interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw6)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def create_connection_bond_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bond-slave interface
cmd.append('connection')
cmd.append('add')
cmd.append('type')
cmd.append('bond-slave')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append('master')
if self.conn_name is not None:
cmd.append(self.master)
return cmd
def modify_connection_bond_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bond-slave interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
cmd.append('connection.master')
cmd.append(self.master)
return cmd
def create_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('ethernet')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def modify_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw6)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.mtu is not None:
cmd.append('802-3-ethernet.mtu')
cmd.append(self.mtu)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def create_connection_bridge(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bridge interface
return cmd
def modify_connection_bridge(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bridge interface
return cmd
def create_connection_vlan(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
return cmd
def modify_connection_vlan(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
return cmd
def create_connection(self):
cmd=[]
if self.type=='team':
# cmd=self.create_connection_team()
if (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_team()
self.execute_command(cmd)
cmd=self.modify_connection_team()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
elif (self.dns4 is None) or (self.dns6 is None):
cmd=self.create_connection_team()
return self.execute_command(cmd)
elif self.type=='team-slave':
if self.mtu is not None:
cmd=self.create_connection_team_slave()
self.execute_command(cmd)
cmd=self.modify_connection_team_slave()
self.execute_command(cmd)
# cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_team_slave()
return self.execute_command(cmd)
elif self.type=='bond':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_bond()
self.execute_command(cmd)
cmd=self.modify_connection_bond()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_bond()
return self.execute_command(cmd)
elif self.type=='bond-slave':
cmd=self.create_connection_bond_slave()
elif self.type=='ethernet':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_ethernet()
self.execute_command(cmd)
cmd=self.modify_connection_ethernet()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_ethernet()
return self.execute_command(cmd)
elif self.type=='bridge':
cmd=self.create_connection_bridge()
elif self.type=='vlan':
cmd=self.create_connection_vlan()
return self.execute_command(cmd)
def remove_connection(self):
# self.down_connection()
cmd=[self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('del')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def modify_connection(self):
cmd=[]
if self.type=='team':
cmd=self.modify_connection_team()
elif self.type=='team-slave':
cmd=self.modify_connection_team_slave()
elif self.type=='bond':
cmd=self.modify_connection_bond()
elif self.type=='bond-slave':
cmd=self.modify_connection_bond_slave()
elif self.type=='ethernet':
cmd=self.modify_connection_ethernet()
elif self.type=='bridge':
cmd=self.modify_connection_bridge()
elif self.type=='vlan':
cmd=self.modify_connection_vlan()
return self.execute_command(cmd)
def main():
# Parsing argument file
module=AnsibleModule(
argument_spec=dict(
autoconnect=dict(required=False, default=None, type='bool'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
conn_name=dict(required=True, type='str'),
master=dict(required=False, default=None, type='str'),
ifname=dict(required=False, default=None, type='str'),
type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'),
ip4=dict(required=False, default=None, type='str'),
gw4=dict(required=False, default=None, type='str'),
dns4=dict(required=False, default=None, type='str'),
ip6=dict(required=False, default=None, type='str'),
gw6=dict(required=False, default=None, type='str'),
dns6=dict(required=False, default=None, type='str'),
# Bond Specific vars
mode=dict(require=False, default="balance-rr", type='str', choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad",
"balance-tlb", "balance-alb"]),
miimon=dict(required=False, default=None, type='str'),
downdelay=dict(required=False, default=None, type='str'),
updelay=dict(required=False, default=None, type='str'),
arp_interval=dict(required=False, default=None, type='str'),
arp_ip_target=dict(required=False, default=None, type='str'),
# general usage
mtu=dict(required=False, default=None, type='str'),
mac=dict(required=False, default=None, type='str'),
# bridge specific vars
stp=dict(required=False, default=True, type='bool'),
priority=dict(required=False, default="128", type='str'),
slavepriority=dict(required=False, default="32", type='str'),
forwarddelay=dict(required=False, default="15", type='str'),
hellotime=dict(required=False, default="2", type='str'),
maxage=dict(required=False, default="20", type='str'),
ageingtime=dict(required=False, default="300", type='str'),
# vlan specific vars
vlanid=dict(required=False, default=None, type='str'),
vlandev=dict(required=False, default=None, type='str'),
flags=dict(required=False, default=None, type='str'),
ingress=dict(required=False, default=None, type='str'),
egress=dict(required=False, default=None, type='str'),
),
supports_check_mode=True
)
if not HAVE_DBUS:
module.fail_json(msg="This module requires dbus python bindings")
if not HAVE_NM_CLIENT:
module.fail_json(msg="This module requires NetworkManager glib API")
nmcli=Nmcli(module)
rc=None
out=''
err=''
result={}
result['conn_name']=nmcli.conn_name
result['state']=nmcli.state
# check for issues
if nmcli.conn_name is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
# team-slave checks
if nmcli.type=='team-slave' and nmcli.master is None:
nmcli.module.fail_json(msg="You haven't specified a name for the master so we're not changing a thing")
if nmcli.type=='team-slave' and nmcli.ifname is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
if nmcli.state=='absent':
if nmcli.connection_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.down_connection()
(rc, out, err)=nmcli.remove_connection()
if rc!=0:
module.fail_json(name =('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
elif nmcli.state=='present':
if nmcli.connection_exists():
# modify connection (note: this function is check mode aware)
# result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
result['Exists']='Connections do exist so we are modifying them'
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.modify_connection()
if not nmcli.connection_exists():
result['Connection']=('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.create_connection()
if rc is not None and rc!=0:
module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
if rc is None:
result['changed']=False
else:
result['changed']=True
if out:
result['stdout']=out
if err:
result['stderr']=err
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
noisemaster/AdamTestBot
|
future/builtins/newround.py
|
2
|
3204
|
"""
``python-future``: pure Python implementation of Python 3 round().
"""
from future.utils import PYPY, PY26, bind_method
# Use the decimal module for simplicity of implementation (and
# hopefully correctness).
from decimal import Decimal, ROUND_HALF_EVEN
def newround(number, ndigits=None):
"""
See Python 3 documentation: uses Banker's Rounding.
Delegates to the __round__ method if for some reason this exists.
If not, rounds a number to a given precision in decimal digits (default
0 digits). This returns an int when called with one argument,
otherwise the same type as the number. ndigits may be negative.
See the test_round method in future/tests/test_builtins.py for
examples.
"""
return_int = False
if ndigits is None:
return_int = True
ndigits = 0
if hasattr(number, '__round__'):
return number.__round__(ndigits)
if ndigits < 0:
raise NotImplementedError('negative ndigits not supported yet')
exponent = Decimal('10') ** (-ndigits)
if PYPY:
# Work around issue #24: round() breaks on PyPy with NumPy's types
if 'numpy' in repr(type(number)):
number = float(number)
if not PY26:
d = Decimal.from_float(number).quantize(exponent,
rounding=ROUND_HALF_EVEN)
else:
d = from_float_26(number).quantize(exponent, rounding=ROUND_HALF_EVEN)
if return_int:
return int(d)
else:
return float(d)
### From Python 2.7's decimal.py. Only needed to support Py2.6:
def from_float_26(f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
import math as _math
from decimal import _dec_from_triple # only available on Py2.6 and Py2.7 (not 3.3)
if isinstance(f, (int, long)): # handle integer inputs
return Decimal(f)
if _math.isinf(f) or _math.isnan(f): # raises TypeError if not a float
return Decimal(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
# int.bit_length() method doesn't exist on Py2.6:
def bit_length(d):
if d != 0:
return len(bin(abs(d))) - 2
else:
return 0
k = bit_length(d) - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
return result
__all__ = ['newround']
|
mit
|
glwu/python-for-android
|
python-build/python-libs/xmpppy/xmpp/roster.py
|
203
|
9163
|
## roster.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: roster.py,v 1.20 2005/07/13 13:22:52 snakeru Exp $
"""
Simple roster implementation. Can be used though for different tasks like
mass-renaming of contacts.
"""
from protocol import *
from client import PlugIn
class Roster(PlugIn):
""" Defines a plenty of methods that will allow you to manage roster.
Also automatically track presences from remote JIDs taking into
account that every JID can have multiple resources connected. Does not
currently support 'error' presences.
You can also use mapping interface for access to the internal representation of
contacts in roster.
"""
def __init__(self):
""" Init internal variables. """
PlugIn.__init__(self)
self.DBG_LINE='roster'
self._data = {}
self.set=None
self._exported_methods=[self.getRoster]
def plugin(self,owner,request=1):
""" Register presence and subscription trackers in the owner's dispatcher.
Also request roster from server if the 'request' argument is set.
Used internally."""
self._owner.RegisterHandler('iq',self.RosterIqHandler,'result',NS_ROSTER)
self._owner.RegisterHandler('iq',self.RosterIqHandler,'set',NS_ROSTER)
self._owner.RegisterHandler('presence',self.PresenceHandler)
if request: self.Request()
def Request(self,force=0):
""" Request roster from server if it were not yet requested
(or if the 'force' argument is set). """
if self.set is None: self.set=0
elif not force: return
self._owner.send(Iq('get',NS_ROSTER))
self.DEBUG('Roster requested from server','start')
def getRoster(self):
""" Requests roster from server if neccessary and returns self."""
if not self.set: self.Request()
while not self.set: self._owner.Process(10)
return self
def RosterIqHandler(self,dis,stanza):
""" Subscription tracker. Used internally for setting items state in
internal roster representation. """
for item in stanza.getTag('query').getTags('item'):
jid=item.getAttr('jid')
if item.getAttr('subscription')=='remove':
if self._data.has_key(jid): del self._data[jid]
raise NodeProcessed # a MUST
self.DEBUG('Setting roster item %s...'%jid,'ok')
if not self._data.has_key(jid): self._data[jid]={}
self._data[jid]['name']=item.getAttr('name')
self._data[jid]['ask']=item.getAttr('ask')
self._data[jid]['subscription']=item.getAttr('subscription')
self._data[jid]['groups']=[]
if not self._data[jid].has_key('resources'): self._data[jid]['resources']={}
for group in item.getTags('group'): self._data[jid]['groups'].append(group.getData())
self._data[self._owner.User+'@'+self._owner.Server]={'resources':{},'name':None,'ask':None,'subscription':None,'groups':None,}
self.set=1
raise NodeProcessed # a MUST. Otherwise you'll get back an <iq type='error'/>
def PresenceHandler(self,dis,pres):
""" Presence tracker. Used internally for setting items' resources state in
internal roster representation. """
jid=JID(pres.getFrom())
if not self._data.has_key(jid.getStripped()): self._data[jid.getStripped()]={'name':None,'ask':None,'subscription':'none','groups':['Not in roster'],'resources':{}}
item=self._data[jid.getStripped()]
typ=pres.getType()
if not typ:
self.DEBUG('Setting roster item %s for resource %s...'%(jid.getStripped(),jid.getResource()),'ok')
item['resources'][jid.getResource()]=res={'show':None,'status':None,'priority':'0','timestamp':None}
if pres.getTag('show'): res['show']=pres.getShow()
if pres.getTag('status'): res['status']=pres.getStatus()
if pres.getTag('priority'): res['priority']=pres.getPriority()
if not pres.getTimestamp(): pres.setTimestamp()
res['timestamp']=pres.getTimestamp()
elif typ=='unavailable' and item['resources'].has_key(jid.getResource()): del item['resources'][jid.getResource()]
# Need to handle type='error' also
def _getItemData(self,jid,dataname):
""" Return specific jid's representation in internal format. Used internally. """
jid=jid[:(jid+'/').find('/')]
return self._data[jid][dataname]
def _getResourceData(self,jid,dataname):
""" Return specific jid's resource representation in internal format. Used internally. """
if jid.find('/')+1:
jid,resource=jid.split('/',1)
if self._data[jid]['resources'].has_key(resource): return self._data[jid]['resources'][resource][dataname]
elif self._data[jid]['resources'].keys():
lastpri=-129
for r in self._data[jid]['resources'].keys():
if int(self._data[jid]['resources'][r]['priority'])>lastpri: resource,lastpri=r,int(self._data[jid]['resources'][r]['priority'])
return self._data[jid]['resources'][resource][dataname]
def delItem(self,jid):
""" Delete contact 'jid' from roster."""
self._owner.send(Iq('set',NS_ROSTER,payload=[Node('item',{'jid':jid,'subscription':'remove'})]))
def getAsk(self,jid):
""" Returns 'ask' value of contact 'jid'."""
return self._getItemData(jid,'ask')
def getGroups(self,jid):
""" Returns groups list that contact 'jid' belongs to."""
return self._getItemData(jid,'groups')
def getName(self,jid):
""" Returns name of contact 'jid'."""
return self._getItemData(jid,'name')
def getPriority(self,jid):
""" Returns priority of contact 'jid'. 'jid' should be a full (not bare) JID."""
return self._getResourceData(jid,'priority')
def getRawRoster(self):
""" Returns roster representation in internal format. """
return self._data
def getRawItem(self,jid):
""" Returns roster item 'jid' representation in internal format. """
return self._data[jid[:(jid+'/').find('/')]]
def getShow(self, jid):
""" Returns 'show' value of contact 'jid'. 'jid' should be a full (not bare) JID."""
return self._getResourceData(jid,'show')
def getStatus(self, jid):
""" Returns 'status' value of contact 'jid'. 'jid' should be a full (not bare) JID."""
return self._getResourceData(jid,'status')
def getSubscription(self,jid):
""" Returns 'subscription' value of contact 'jid'."""
return self._getItemData(jid,'subscription')
def getResources(self,jid):
""" Returns list of connected resources of contact 'jid'."""
return self._data[jid[:(jid+'/').find('/')]]['resources'].keys()
def setItem(self,jid,name=None,groups=[]):
""" Creates/renames contact 'jid' and sets the groups list that it now belongs to."""
iq=Iq('set',NS_ROSTER)
query=iq.getTag('query')
attrs={'jid':jid}
if name: attrs['name']=name
item=query.setTag('item',attrs)
for group in groups: item.addChild(node=Node('group',payload=[group]))
self._owner.send(iq)
def getItems(self):
""" Return list of all [bare] JIDs that the roster is currently tracks."""
return self._data.keys()
def keys(self):
""" Same as getItems. Provided for the sake of dictionary interface."""
return self._data.keys()
def __getitem__(self,item):
""" Get the contact in the internal format. Raises KeyError if JID 'item' is not in roster."""
return self._data[item]
def getItem(self,item):
""" Get the contact in the internal format (or None if JID 'item' is not in roster)."""
if self._data.has_key(item): return self._data[item]
def Subscribe(self,jid):
""" Send subscription request to JID 'jid'."""
self._owner.send(Presence(jid,'subscribe'))
def Unsubscribe(self,jid):
""" Ask for removing our subscription for JID 'jid'."""
self._owner.send(Presence(jid,'unsubscribe'))
def Authorize(self,jid):
""" Authorise JID 'jid'. Works only if these JID requested auth previously. """
self._owner.send(Presence(jid,'subscribed'))
def Unauthorize(self,jid):
""" Unauthorise JID 'jid'. Use for declining authorisation request
or for removing existing authorization. """
self._owner.send(Presence(jid,'unsubscribed'))
|
apache-2.0
|
Astrophilic/Algorithms_Example
|
Bitap Algorithm/Python/BiTap.py
|
9
|
6159
|
# -*- coding: utf-8 -*-
import sys
"""Auxiliary procedure for printing each item of row in columns in binary form
"""
def _printTable(t, size):
out = ""
for i in range(len(t)):
binaryForm = bin(t[i])
binaryForm = binaryForm[2 : ]
binaryForm = binaryForm.zfill(size)
out += binaryForm + ", "
out = out[ : -2]
print out
"""Bitap (Shift-Or) fuzzy searching algorithm with Wu-Manber modifications.
http://habrahabr.ru/post/114997/
http://habrahabr.ru/post/132128/
http://ru.wikipedia.org/wiki/Двоичный_алгоритм_поиска_подстроки
Search needle(pattern) in haystack(real word from text) with maximum alterations = maxErrors.
If maxErrors equal 0 - execute precise searching only.
Return approximately place of needle in haystack and number of alterations.
If needle can't find with maxErrors alterations, return tuple of empty string and -1.
"""
def bitapSearch(haystack, needle, maxErrors):
haystackLen = len(haystack)
needleLen = len(needle)
"""Genarating mask for each letter in haystack.
This mask shows presence letter in needle.
"""
def _generateAlphabet(needle, haystack):
alphabet = {}
for letter in haystack:
if letter not in alphabet:
letterPositionInNeedle = 0
for symbol in needle:
letterPositionInNeedle = letterPositionInNeedle << 1
letterPositionInNeedle |= int(letter != symbol)
alphabet[letter] = letterPositionInNeedle
return alphabet
alphabet = _generateAlphabet(needle, haystack)
table = [] # first index - over k (errors count, numeration starts from 1), second - over columns (letters of haystack)
emptyColumn = (2 << (needleLen - 1)) - 1
# Generate underground level of table
underground = []
[underground.append(emptyColumn) for i in range(haystackLen + 1)]
table.append(underground)
_printTable(table[0], needleLen)
# Execute precise matching
k = 1
table.append([emptyColumn])
for columnNum in range(1, haystackLen + 1):
prevColumn = (table[k][columnNum - 1]) >> 1
letterPattern = alphabet[haystack[columnNum - 1]]
curColumn = prevColumn | letterPattern
table[k].append(curColumn)
if (curColumn & 0x1) == 0:
place = haystack[columnNum - needleLen : columnNum]
return (place, k - 1)
_printTable(table[k], needleLen)
# Execute fuzzy searching with calculation Levenshtein distance
for k in range(2, maxErrors + 2):
print "Errors =", k - 1
table.append([emptyColumn])
for columnNum in range(1, haystackLen + 1):
prevColumn = (table[k][columnNum - 1]) >> 1
letterPattern = alphabet[haystack[columnNum - 1]]
curColumn = prevColumn | letterPattern
insertColumn = curColumn & (table[k - 1][columnNum - 1])
deleteColumn = curColumn & (table[k - 1][columnNum] >> 1)
replaceColumn = curColumn & (table[k - 1][columnNum - 1] >> 1)
resColumn = insertColumn & deleteColumn & replaceColumn
table[k].append(resColumn)
if (resColumn & 0x1) == 0:
startPos = max(0, columnNum - needleLen - 1) # taking in account Replace operation
endPos = min(columnNum + 1, haystackLen) # taking in account Replace operation
place = haystack[startPos : endPos]
return (place, k - 1)
_printTable(table[k], needleLen)
return ("", -1)
"""Highlight letters in fullWord, which concur with letters in pattern with same order.
wordPart - it's a part of fullWord, where matching with pattern letters will execute.
"""
class bitapHighlighter():
def __init__(self, fullWord, wordPart, pattern):
self._fullWord = fullWord
self._wordPart = wordPart
self._pattern = pattern
self._largestSequence = ""
"""Finding longest sequence of letters in word. Letters must have same order, as in pattern
"""
def _nextSequence(self, fromPatternPos, fromWordPos, prevSequence):
for patternPos in range(fromPatternPos, len(self._pattern)):
char = self._pattern[patternPos]
for wordPos in range(fromWordPos, len(self._wordPart)):
if char == self._wordPart[wordPos]:
sequence = prevSequence + char
self._nextSequence(patternPos + 1, wordPos + 1, sequence)
if len(self._largestSequence) < len(prevSequence):
self._largestSequence = prevSequence
"""Divide fullWord on parts: head, place(wordPart) and tail.
Select each letter of wordPart, which present in _largestSequence with <b></b> tags
Return gathered parts in one highlighted full word
"""
def _gatherFullWord(self):
placePos = self._fullWord.find(self._wordPart)
head = self._fullWord[0 : placePos]
tail = self._fullWord[placePos + len(self._wordPart) : ]
highlightedPlace = ""
for symbol in self._wordPart:
if symbol == self._largestSequence[0 : 1]:
highlightedPlace += "<b>" + symbol + "</b>"
self._largestSequence = self._largestSequence[1 : ]
else:
highlightedPlace += symbol
return head + highlightedPlace + tail
"""Run highlighting and return highlited word.
"""
def getHighlightedWord(self):
self._nextSequence(0, 0, "")
return self._gatherFullWord()
haystack = sys.argv[1]
needle = sys.argv[2]
errorsCount = sys.argv[3]
print "haystack = " + haystack + ". needle = " + needle + ". errorsCount = " + errorsCount
# Display letters of haystack in columns
out = ""
out = out.ljust(len(needle) + 2)
for i in range(len(haystack)):
out += haystack[i].ljust(len(needle)) + " "
out = out[ : -2]
print out
# Start bitap searching
(needlePlace, errors) = bitapSearch(haystack, needle, int(errorsCount))
print "Result of Bitap searching:", needlePlace, errors
print bitapHighlighter(haystack, needlePlace, needle).getHighlightedWord()
|
apache-2.0
|
cyberark-bizdev/ansible
|
lib/ansible/modules/clustering/k8s/k8s_raw.py
|
2
|
4093
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_raw
short_description: Manage Kubernetes (K8s) objects
version_added: "2.5"
author: "Chris Houseknecht (@chouseknecht)"
description:
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
- Pass the object definition from a source file or inline. See examples for reading
files and using Jinja templates.
- Access to the full range of K8s APIs.
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- k8s_state_options
- k8s_name_options
- k8s_resource_options
- k8s_auth_options
requirements:
- "python >= 2.7"
- "openshift == 0.4.1"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Create a k8s namespace
k8s_raw:
name: testing
api_version: v1
kind: Namespace
state: present
- name: Create a Service object from an inline definition
k8s_raw:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
- name: Create a Service object by reading the definition from a file
k8s_raw:
state: present
src: /testing/service.yml
- name: Get an existing Service object
k8s_raw:
api_version: v1
kind: Service
name: web
namespace: testing
register: web_service
- name: Get a list of all service objects
k8s_raw:
api_version: v1
kind: ServiceList
namespace: testing
register: service_list
- name: Remove an existing Service object
k8s_raw:
state: absent
api_version: v1
kind: Service
namespace: testing
name: web
# Passing the object definition from a file
- name: Create a Deployment by reading the definition from a local file
k8s_raw:
state: present
src: /testing/deployment.yml
- name: Read definition file from the Ansible controller file system
k8s_raw:
state: present
definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
- name: Read definition file from the Ansible controller file system after Jinja templating
k8s_raw:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
items:
description: Returned only when the I(kind) is a List type resource. Contains a set of objects.
returned: when resource is a List
type: list
'''
from ansible.module_utils.k8s.raw import KubernetesRawModule
def main():
KubernetesRawModule().execute_module()
if __name__ == '__main__':
main()
|
gpl-3.0
|
LookThisCode/DeveloperBus
|
Season 2013/Brazil/Projects/QueroMe-master/openid/test/discoverdata.py
|
87
|
4109
|
"""Module to make discovery data test cases available"""
import urlparse
import os.path
from openid.yadis.discover import DiscoveryResult, DiscoveryFailure
from openid.yadis.constants import YADIS_HEADER_NAME
tests_dir = os.path.dirname(__file__)
data_path = os.path.join(tests_dir, 'data')
testlist = [
# success, input_name, id_name, result_name
(True, "equiv", "equiv", "xrds"),
(True, "header", "header", "xrds"),
(True, "lowercase_header", "lowercase_header", "xrds"),
(True, "xrds", "xrds", "xrds"),
(True, "xrds_ctparam", "xrds_ctparam", "xrds_ctparam"),
(True, "xrds_ctcase", "xrds_ctcase", "xrds_ctcase"),
(False, "xrds_html", "xrds_html", "xrds_html"),
(True, "redir_equiv", "equiv", "xrds"),
(True, "redir_header", "header", "xrds"),
(True, "redir_xrds", "xrds", "xrds"),
(False, "redir_xrds_html", "xrds_html", "xrds_html"),
(True, "redir_redir_equiv", "equiv", "xrds"),
(False, "404_server_response", None, None),
(False, "404_with_header", None, None),
(False, "404_with_meta", None, None),
(False, "201_server_response", None, None),
(False, "500_server_response", None, None),
]
def getDataName(*components):
sanitized = []
for part in components:
if part in ['.', '..']:
raise ValueError
elif part:
sanitized.append(part)
if not sanitized:
raise ValueError
return os.path.join(data_path, *sanitized)
def getExampleXRDS():
filename = getDataName('example-xrds.xml')
return file(filename).read()
example_xrds = getExampleXRDS()
default_test_file = getDataName('test1-discover.txt')
discover_tests = {}
def readTests(filename):
data = file(filename).read()
tests = {}
for case in data.split('\f\n'):
(name, content) = case.split('\n', 1)
tests[name] = content
return tests
def getData(filename, name):
global discover_tests
try:
file_tests = discover_tests[filename]
except KeyError:
file_tests = discover_tests[filename] = readTests(filename)
return file_tests[name]
def fillTemplate(test_name, template, base_url, example_xrds):
mapping = [
('URL_BASE/', base_url),
('<XRDS Content>', example_xrds),
('YADIS_HEADER', YADIS_HEADER_NAME),
('NAME', test_name),
]
for k, v in mapping:
template = template.replace(k, v)
return template
def generateSample(test_name, base_url,
example_xrds=example_xrds,
filename=default_test_file):
try:
template = getData(filename, test_name)
except IOError, why:
import errno
if why[0] == errno.ENOENT:
raise KeyError(filename)
else:
raise
return fillTemplate(test_name, template, base_url, example_xrds)
def generateResult(base_url, input_name, id_name, result_name, success):
input_url = urlparse.urljoin(base_url, input_name)
# If the name is None then we expect the protocol to fail, which
# we represent by None
if id_name is None:
assert result_name is None
return input_url, DiscoveryFailure
result = generateSample(result_name, base_url)
headers, content = result.split('\n\n', 1)
header_lines = headers.split('\n')
for header_line in header_lines:
if header_line.startswith('Content-Type:'):
_, ctype = header_line.split(':', 1)
ctype = ctype.strip()
break
else:
ctype = None
id_url = urlparse.urljoin(base_url, id_name)
result = DiscoveryResult(input_url)
result.normalized_uri = id_url
if success:
result.xrds_uri = urlparse.urljoin(base_url, result_name)
result.content_type = ctype
result.response_text = content
return input_url, result
|
apache-2.0
|
urisimchoni/samba
|
python/samba/tests/dcerpc/string.py
|
9
|
4393
|
# Unix SMB/CIFS implementation.
# Copyright (C) Andrew Bartlett <[email protected]> 2016
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for string and unicode handling in PIDL generated bindings
samba.dcerpc.*"""
from samba.dcerpc import drsblobs
import samba.tests
from samba.ndr import ndr_unpack, ndr_pack
import talloc
import gc
class TestException(Exception):
pass
class StringTests(samba.tests.TestCase):
def setUp(self):
super(StringTests, self).setUp()
talloc.enable_null_tracking()
self.startup_blocks = talloc.total_blocks()
def tearDown(self):
super(StringTests, self).tearDown()
gc.collect()
if talloc.total_blocks() != self.startup_blocks:
talloc.report_full()
self.fail("it appears we are leaking memory")
def test_string_from_python(self):
info = drsblobs.repsFromTo2OtherInfo()
info.dns_name1 = "hello.example.com"
info.dns_name2 = "goodbye.example.com"
gc.collect()
self.assertIsNotNone(info)
self.assertEqual(info.dns_name1, "hello.example.com")
self.assertEqual(info.dns_name2, "goodbye.example.com")
info.dns_name1 = ""
info.dns_name2 = "goodbye.example.com"
self.assertEqual(info.dns_name1, "")
self.assertEqual(info.dns_name2, "goodbye.example.com")
info.dns_name2 = None
self.assertEqual(info.dns_name1, "")
self.assertIsNone(info.dns_name2)
def test_string_with_exception(self):
try:
self.test_string_from_python()
raise TestException()
except TestException:
pass
def test_string_from_python_function(self):
def get_info():
info = drsblobs.repsFromTo2OtherInfo()
info.dns_name1 = "1.example.com"
info.dns_name2 = "2.example.com"
return info
info = get_info()
gc.collect()
self.assertIsNotNone(info)
self.assertEqual(info.dns_name1, "1.example.com")
self.assertEqual(info.dns_name2, "2.example.com")
def test_string_modify_in_place(self):
info = drsblobs.repsFromTo2OtherInfo()
info.dns_name1 = "1.example.com"
info.dns_name2 = "%s.example.com"
gc.collect()
self.assertIsNotNone(info)
self.assertEqual(info.dns_name1, "1.example.com")
self.assertEqual(info.dns_name2, "%s.example.com")
info.dns_name1 += ".co.nz"
info.dns_name2 %= 2
self.assertEqual(info.dns_name1, "1.example.com.co.nz")
self.assertEqual(info.dns_name2, "2.example.com")
del info
def test_string_delete(self):
gc.collect()
info = drsblobs.repsFromTo2OtherInfo()
info.dns_name1 = "1.example.com"
info.dns_name2 = "2.example.com"
info.dns_name1 = None
try:
del info.dns_name2
except AttributeError:
pass
self.assertIsNotNone(info)
self.assertIsNone(info.dns_name1)
self.assertIsNotNone(info.dns_name2)
class StringTestsWithoutLeakCheck(samba.tests.TestCase):
"""We know that the ndr unpacking test leaves stuff in the
autofree_context, and we don't want to worry about that. So for
this test we don't make meory leak assertions."""
def test_string_from_ndr(self):
info = drsblobs.repsFromTo2OtherInfo()
info.dns_name1 = "1.example.com"
info.dns_name2 = "2.example.com"
packed = ndr_pack(info)
gc.collect()
info_unpacked = ndr_unpack(drsblobs.repsFromTo2OtherInfo, packed)
self.assertIsNotNone(info_unpacked)
self.assertEqual(info_unpacked.dns_name1, "1.example.com")
self.assertEqual(info_unpacked.dns_name2, "2.example.com")
|
gpl-3.0
|
brandsoulmates/incubator-airflow
|
airflow/utils/operator_helpers.py
|
30
|
1546
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def context_to_airflow_vars(context):
"""
Given a context, this function provides a dictionary of values that can be used to
externally reconstruct relations between dags, dag_runs, tasks and task_instances.
:param context: The context for the task_instance of interest
:type context: dict
"""
params = dict()
dag = context.get('dag')
if dag and dag.dag_id:
params['airflow.ctx.dag.dag_id'] = dag.dag_id
dag_run = context.get('dag_run')
if dag_run and dag_run.execution_date:
params['airflow.ctx.dag_run.execution_date'] = dag_run.execution_date.isoformat()
task = context.get('task')
if task and task.task_id:
params['airflow.ctx.task.task_id'] = task.task_id
task_instance = context.get('task_instance')
if task_instance and task_instance.execution_date:
params['airflow.ctx.task_instance.execution_date'] = \
task_instance.execution_date.isoformat()
return params
|
apache-2.0
|
demarle/VTK
|
Examples/Modelling/Python/SpherePuzzle.py
|
14
|
3824
|
#!/usr/bin/env python
# A game with VTK and Tkinter. :)
import Tkinter
import vtk
from vtk.tk.vtkTkRenderWindowInteractor import vtkTkRenderWindowInteractor
# Create the pipeline
puzzle = vtk.vtkSpherePuzzle()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(puzzle.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
arrows = vtk.vtkSpherePuzzleArrows()
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(arrows.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
renWin = vtk.vtkRenderWindow()
ren = vtk.vtkRenderer()
renWin.AddRenderer(ren)
# Add the actors to the renderer, set the background and size
ren.AddActor(actor)
ren.AddActor(actor2)
ren.SetBackground(0.1, 0.2, 0.4)
ren.ResetCamera()
cam = ren.GetActiveCamera()
cam.Elevation(-40)
## Generate the GUI
root = Tkinter.Tk()
root.withdraw()
# Define a quit method that exits cleanly.
def quit(obj=root):
obj.quit()
# Create the toplevel window
top = Tkinter.Toplevel(root)
top.title("Sphere Puzzle")
top.protocol("WM_DELETE_WINDOW", quit)
# Create some frames
f1 = Tkinter.Frame(top)
f2 = Tkinter.Frame(top)
f1.pack(side="top", anchor="n", expand=1, fill="both")
f2.pack(side="bottom", anchor="s", expand="t", fill="x")
# Create the Tk render widget, and bind the events
rw = vtkTkRenderWindowInteractor(f1, width=400, height=400, rw=renWin)
rw.pack(expand="t", fill="both")
def reset(evt=None):
puzzle.Reset()
renWin.Render()
# Display some information
l1 = Tkinter.Label(f2, text="Position cursor over the rotation plane.")
l2 = Tkinter.Label(f2, text="Moving pieces will be highlighted.")
l3 = Tkinter.Label(f2, text="Press 'm' to make a move.")
reset = Tkinter.Button(f2, text="Reset", command=reset)
b1 = Tkinter.Button(f2, text="Quit", command=quit)
for i in (l1, l2, l3, reset, b1):
i.pack(side="top", expand="t", fill="x")
# Done with the GUI. Create callback functions.
in_piece_rotation = 0
LastVal = None
# Highlight pieces
def MotionCallback(obj, event):
global in_piece_rotation
global LastVal
if in_piece_rotation:
return
iren = renWin.GetInteractor()
istyle = iren.GetInteractorStyle().GetCurrentStyle()
# Return if the user is performing interaction
if istyle.GetState():
return
# Get mouse position
pos = iren.GetEventPosition()
x, y = pos
# Get world point
ren.SetDisplayPoint(x, y, ren.GetZ(x, y))
ren.DisplayToWorld()
pt = ren.GetWorldPoint()
val = puzzle.SetPoint(pt[0], pt[1], pt[2])
if (not LastVal) or val != LastVal:
renWin.Render()
LastVal = val
# Rotate the puzzle
def CharCallback(obj, event):
iren = renWin.GetInteractor()
keycode = iren.GetKeyCode()
if keycode != "m" and keycode != "M":
return
pos = iren.GetEventPosition()
ButtonCallback(pos[0], pos[1])
def ButtonCallback(x, y):
global in_piece_rotation
if in_piece_rotation:
return
in_piece_rotation = 1
# Get world point
ren.SetDisplayPoint(x, y, ren.GetZ(x,y))
ren.DisplayToWorld()
pt = ren.GetWorldPoint()
x, y, z = pt[:3]
for i in range(0, 101, 10):
puzzle.SetPoint(x, y, z)
puzzle.MovePoint(i)
renWin.Render()
root.update()
in_piece_rotation = 0
root.update()
# Modify some bindings, use the interactor style 'switch'
iren = renWin.GetInteractor()
istyle = vtk.vtkInteractorStyleSwitch()
iren.SetInteractorStyle(istyle)
istyle.SetCurrentStyleToTrackballCamera()
iren.AddObserver("MouseMoveEvent", MotionCallback)
iren.AddObserver("CharEvent", CharCallback)
# Shuffle the puzzle
ButtonCallback(218, 195)
ButtonCallback(261, 128)
ButtonCallback(213, 107)
ButtonCallback(203, 162)
ButtonCallback(134, 186)
iren.Initialize()
renWin.Render()
iren.Start()
root.mainloop()
|
bsd-3-clause
|
koomik/CouchPotatoServer
|
libs/caper/__init__.py
|
81
|
5426
|
# Copyright 2013 Dean Gardiner <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logr import Logr
from caper.matcher import FragmentMatcher
from caper.objects import CaperFragment, CaperClosure
from caper.parsers.anime import AnimeParser
from caper.parsers.scene import SceneParser
from caper.parsers.usenet import UsenetParser
__version_info__ = ('0', '3', '1')
__version_branch__ = 'master'
__version__ = "%s%s" % (
'.'.join(__version_info__),
'-' + __version_branch__ if __version_branch__ else ''
)
CL_START_CHARS = ['(', '[', '<', '>']
CL_END_CHARS = [')', ']', '<', '>']
CL_END_STRINGS = [' - ']
STRIP_START_CHARS = ''.join(CL_START_CHARS)
STRIP_END_CHARS = ''.join(CL_END_CHARS)
STRIP_CHARS = ''.join(['_', ' ', '.'])
FRAGMENT_SEPARATORS = ['.', '-', '_', ' ']
CL_START = 0
CL_END = 1
class Caper(object):
def __init__(self, debug=False):
self.debug = debug
self.parsers = {
'anime': AnimeParser,
'scene': SceneParser,
'usenet': UsenetParser
}
def _closure_split(self, name):
"""
:type name: str
:rtype: list of CaperClosure
"""
closures = []
def end_closure(closures, buf):
buf = buf.strip(STRIP_CHARS)
if len(buf) < 2:
return
cur = CaperClosure(len(closures), buf)
cur.left = closures[len(closures) - 1] if len(closures) > 0 else None
if cur.left:
cur.left.right = cur
closures.append(cur)
state = CL_START
buf = ""
for x, ch in enumerate(name):
# Check for start characters
if state == CL_START and ch in CL_START_CHARS:
end_closure(closures, buf)
state = CL_END
buf = ""
buf += ch
if state == CL_END and ch in CL_END_CHARS:
# End character found, create the closure
end_closure(closures, buf)
state = CL_START
buf = ""
elif state == CL_START and buf[-3:] in CL_END_STRINGS:
# End string found, create the closure
end_closure(closures, buf[:-3])
state = CL_START
buf = ""
end_closure(closures, buf)
return closures
def _clean_closure(self, closure):
"""
:type closure: str
:rtype: str
"""
return closure.lstrip(STRIP_START_CHARS).rstrip(STRIP_END_CHARS)
def _fragment_split(self, closures):
"""
:type closures: list of CaperClosure
:rtype: list of CaperClosure
"""
cur_position = 0
cur = None
def end_fragment(fragments, cur, cur_position):
cur.position = cur_position
cur.left = fragments[len(fragments) - 1] if len(fragments) > 0 else None
if cur.left:
cur.left_sep = cur.left.right_sep
cur.left.right = cur
cur.right_sep = ch
fragments.append(cur)
for closure in closures:
closure.fragments = []
separator_buffer = ""
for x, ch in enumerate(self._clean_closure(closure.value)):
if not cur:
cur = CaperFragment(closure)
if ch in FRAGMENT_SEPARATORS:
if cur.value:
separator_buffer = ""
separator_buffer += ch
if cur.value or not closure.fragments:
end_fragment(closure.fragments, cur, cur_position)
elif len(separator_buffer) > 1:
cur.value = separator_buffer.strip()
if cur.value:
end_fragment(closure.fragments, cur, cur_position)
separator_buffer = ""
# Reset
cur = None
cur_position += 1
else:
cur.value += ch
# Finish parsing the last fragment
if cur and cur.value:
end_fragment(closure.fragments, cur, cur_position)
# Reset
cur_position = 0
cur = None
return closures
def parse(self, name, parser='scene'):
closures = self._closure_split(name)
closures = self._fragment_split(closures)
# Print closures
for closure in closures:
Logr.debug("closure [%s]", closure.value)
for fragment in closure.fragments:
Logr.debug("\tfragment [%s]", fragment.value)
if parser not in self.parsers:
raise ValueError("Unknown parser")
# TODO autodetect the parser type
return self.parsers[parser](self.debug).run(closures)
|
gpl-3.0
|
TEAM-Gummy/platform_external_chromium_org
|
tools/telemetry/telemetry/core/backends/chrome/inspector_runtime_unittest.py
|
25
|
1042
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import exceptions
from telemetry.unittest import tab_test_case
class InspectorRuntimeTest(tab_test_case.TabTestCase):
def testRuntimeEvaluateSimple(self):
res = self._tab.EvaluateJavaScript('1+1')
assert res == 2
def testRuntimeEvaluateThatFails(self):
self.assertRaises(exceptions.EvaluateException,
lambda: self._tab.EvaluateJavaScript('fsdfsdfsf'))
def testRuntimeEvaluateOfSomethingThatCantJSONize(self):
def test():
self._tab.EvaluateJavaScript("""
var cur = {};
var root = {next: cur};
for (var i = 0; i < 1000; i++) {
next = {};
cur.next = next;
cur = next;
}
root;""")
self.assertRaises(exceptions.EvaluateException, test)
def testRuntimeExecuteOfSomethingThatCantJSONize(self):
self._tab.ExecuteJavaScript('window')
|
bsd-3-clause
|
MiltosD/CEFELRC
|
lib/python2.7/site-packages/django/middleware/http.py
|
154
|
1696
|
from django.core.exceptions import MiddlewareNotUsed
from django.utils.http import http_date, parse_http_date_safe
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has a ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since is not None:
if_modified_since = parse_http_date_safe(if_modified_since)
if if_modified_since is not None:
last_modified = parse_http_date_safe(response['Last-Modified'])
if last_modified is not None and last_modified <= if_modified_since:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
|
bsd-3-clause
|
littley/pyvolution
|
internal/BreedingPool.py
|
1
|
3508
|
import random
import math
class BreedingPool(object):
"""
This class is a container for Chromosomes. Allows efficient selection of chromosomes to be "bred".
This class implements a binary tree
"""
class node():
"""
Each node represents the "value" of a single chromosome. Used to map a random number to a particular chromosome
"""
def __init__(self, minVal, maxVal, chromosome):
self.min = minVal
self.max = maxVal
self.chromosome = chromosome
self.left = None
self.right = None
def __eq__(self, other):
"""
Given a floating point number, a cmap will return true for "==" if the number
falls within the range of the cmap
"""
if type(other) is type(self):
return self.min == other.min
return self.min <= other and self.max >= other
def __ne__(self, other):
if type(other) is type(self):
return self.min != other.min
return not self.__eq__(other)
def __lt__(self, other):
if type(other) is type(self):
return self.min < other.min
return self.max < other
def __gt__(self, other):
if type(other) is type(self):
return self.min > other.min
return self.min > other
def __le__(self, other):
if type(other) is type(self):
return self.min <= other.min
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
if type(other) is type(self):
return self.min >= other.min
return self.__eq__(other) or self.__lt__(other)
def __init__(self, population):
allNodes = []
self.max = 0.0 #used to track the maximum value, used by random number generator
for chromosome in population:
increase = chromosome.getFitness()
allNodes.append(self.node(self.max, self.max + increase, chromosome))
self.max += increase
allNodes.sort()
self.root = self.makeTree(allNodes)
def makeTree(self, pop):
"""
Given a sorted list of nodes, recursively construct a binary tree
:param pop: a sorted list of nodes
:return: the root of the tree
"""
if len(pop) == 0:
return None
elif len(pop) == 1:
return pop[0]
middleIndex = int(math.floor(len(pop) / 2))
leftList = pop[:middleIndex]
root = pop[middleIndex]
rightList = pop[middleIndex+1:]
root.left = self.makeTree(leftList)
root.right = self.makeTree(rightList)
return root
def findChromosome(self, n, target):
"""
Recursively search the tree for a chromosome
:param n: a node in the tree
:type n: node
:param target: look for a node that equals this target
:type target: float
:rtype: Chromosome
"""
if n is None:
return None
if n == target:
return n.chromosome
elif target < n:
return self.findChromosome(n.left, target)
elif target > n:
return self.findChromosome(n.right, target)
else:
return None
def get(self):
val = random.uniform(0, self.max)
return self.findChromosome(self.root, val)
|
apache-2.0
|
GodBlessPP/w16b_test
|
static/Brython3.1.3-20150514-095342/Lib/numbers.py
|
883
|
10398
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
TODO: Fill out more detailed documentation on the operators."""
from abc import ABCMeta, abstractmethod
__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
class Number(metaclass=ABCMeta):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
## Notes on Decimal
## ----------------
## Decimal has all of the methods specified by the Real abc, but it should
## not be registered as a Real because decimals do not interoperate with
## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
## abstract reals are expected to interoperate (i.e. R1 + R2 should be
## expected to work if R1 and R2 are both Reals).
class Complex(Number):
"""Complex defines the operations that work on the builtin complex type.
In short, those are: a conversion to complex, .real, .imag, +, -,
*, /, abs(), .conjugate, ==, and !=.
If it is given heterogenous arguments, and doesn't have special
knowledge about them, it should fall back to the builtin complex
type as described below.
"""
__slots__ = ()
@abstractmethod
def __complex__(self):
"""Return a builtin complex instance. Called for complex(self)."""
def __bool__(self):
"""True if self != 0. Called for bool(self)."""
return self != 0
@property
@abstractmethod
def real(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@property
@abstractmethod
def imag(self):
"""Retrieve the imaginary component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractmethod
def __add__(self, other):
"""self + other"""
raise NotImplementedError
@abstractmethod
def __radd__(self, other):
"""other + self"""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""-self"""
raise NotImplementedError
@abstractmethod
def __pos__(self):
"""+self"""
raise NotImplementedError
def __sub__(self, other):
"""self - other"""
return self + -other
def __rsub__(self, other):
"""other - self"""
return -self + other
@abstractmethod
def __mul__(self, other):
"""self * other"""
raise NotImplementedError
@abstractmethod
def __rmul__(self, other):
"""other * self"""
raise NotImplementedError
@abstractmethod
def __truediv__(self, other):
"""self / other: Should promote to float when necessary."""
raise NotImplementedError
@abstractmethod
def __rtruediv__(self, other):
"""other / self"""
raise NotImplementedError
@abstractmethod
def __pow__(self, exponent):
"""self**exponent; should promote to float or complex when necessary."""
raise NotImplementedError
@abstractmethod
def __rpow__(self, base):
"""base ** self"""
raise NotImplementedError
@abstractmethod
def __abs__(self):
"""Returns the Real distance from 0. Called for abs(self)."""
raise NotImplementedError
@abstractmethod
def conjugate(self):
"""(x+y*i).conjugate() returns (x-y*i)."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""self == other"""
raise NotImplementedError
def __ne__(self, other):
"""self != other"""
# The default __ne__ doesn't negate __eq__ until 3.0.
return not (self == other)
Complex.register(complex)
class Real(Complex):
"""To Complex, Real adds the operations that work on real numbers.
In short, those are: a conversion to float, trunc(), divmod,
%, <, <=, >, and >=.
Real also provides defaults for the derived operations.
"""
__slots__ = ()
@abstractmethod
def __float__(self):
"""Any Real can be converted to a native float object.
Called for float(self)."""
raise NotImplementedError
@abstractmethod
def __trunc__(self):
"""trunc(self): Truncates self to an Integral.
Returns an Integral i such that:
* i>0 iff self>0;
* abs(i) <= abs(self);
* for any Integral j satisfying the first two conditions,
abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
i.e. "truncate towards 0".
"""
raise NotImplementedError
@abstractmethod
def __floor__(self):
"""Finds the greatest Integral <= self."""
raise NotImplementedError
@abstractmethod
def __ceil__(self):
"""Finds the least Integral >= self."""
raise NotImplementedError
@abstractmethod
def __round__(self, ndigits=None):
"""Rounds self to ndigits decimal places, defaulting to 0.
If ndigits is omitted or None, returns an Integral, otherwise
returns a Real. Rounds half toward even.
"""
raise NotImplementedError
def __divmod__(self, other):
"""divmod(self, other): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (self // other, self % other)
def __rdivmod__(self, other):
"""divmod(other, self): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (other // self, other % self)
@abstractmethod
def __floordiv__(self, other):
"""self // other: The floor() of self/other."""
raise NotImplementedError
@abstractmethod
def __rfloordiv__(self, other):
"""other // self: The floor() of other/self."""
raise NotImplementedError
@abstractmethod
def __mod__(self, other):
"""self % other"""
raise NotImplementedError
@abstractmethod
def __rmod__(self, other):
"""other % self"""
raise NotImplementedError
@abstractmethod
def __lt__(self, other):
"""self < other
< on Reals defines a total ordering, except perhaps for NaN."""
raise NotImplementedError
@abstractmethod
def __le__(self, other):
"""self <= other"""
raise NotImplementedError
# Concrete implementations of Complex abstract methods.
def __complex__(self):
"""complex(self) == complex(float(self), 0)"""
return complex(float(self))
@property
def real(self):
"""Real numbers are their real component."""
return +self
@property
def imag(self):
"""Real numbers have no imaginary component."""
return 0
def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self
Real.register(float)
class Rational(Real):
""".numerator and .denominator should be in lowest terms."""
__slots__ = ()
@property
@abstractmethod
def numerator(self):
raise NotImplementedError
@property
@abstractmethod
def denominator(self):
raise NotImplementedError
# Concrete implementation of Real's conversion to float.
def __float__(self):
"""float(self) = self.numerator / self.denominator
It's important that this conversion use the integer's "true"
division rather than casting one side to float before dividing
so that ratios of huge integers convert without overflowing.
"""
return self.numerator / self.denominator
class Integral(Rational):
"""Integral adds a conversion to int and the bit-string operations."""
__slots__ = ()
@abstractmethod
def __int__(self):
"""int(self)"""
raise NotImplementedError
def __index__(self):
"""Called whenever an index is needed, such as in slicing"""
return int(self)
@abstractmethod
def __pow__(self, exponent, modulus=None):
"""self ** exponent % modulus, but maybe faster.
Accept the modulus argument if you want to support the
3-argument version of pow(). Raise a TypeError if exponent < 0
or any argument isn't Integral. Otherwise, just implement the
2-argument version described in Complex.
"""
raise NotImplementedError
@abstractmethod
def __lshift__(self, other):
"""self << other"""
raise NotImplementedError
@abstractmethod
def __rlshift__(self, other):
"""other << self"""
raise NotImplementedError
@abstractmethod
def __rshift__(self, other):
"""self >> other"""
raise NotImplementedError
@abstractmethod
def __rrshift__(self, other):
"""other >> self"""
raise NotImplementedError
@abstractmethod
def __and__(self, other):
"""self & other"""
raise NotImplementedError
@abstractmethod
def __rand__(self, other):
"""other & self"""
raise NotImplementedError
@abstractmethod
def __xor__(self, other):
"""self ^ other"""
raise NotImplementedError
@abstractmethod
def __rxor__(self, other):
"""other ^ self"""
raise NotImplementedError
@abstractmethod
def __or__(self, other):
"""self | other"""
raise NotImplementedError
@abstractmethod
def __ror__(self, other):
"""other | self"""
raise NotImplementedError
@abstractmethod
def __invert__(self):
"""~self"""
raise NotImplementedError
# Concrete implementations of Rational and Real abstract methods.
def __float__(self):
"""float(self) == float(int(self))"""
return float(int(self))
@property
def numerator(self):
"""Integers are their own numerators."""
return +self
@property
def denominator(self):
"""Integers have a denominator of 1."""
return 1
Integral.register(int)
|
agpl-3.0
|
kw217/omim
|
3party/protobuf/python/google/protobuf/internal/message_python_test.py
|
74
|
2359
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for ..public.message for the pure Python implementation."""
import os
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'
# We must set the implementation version above before the google3 imports.
# pylint: disable=g-import-not-at-top
from google.apputils import basetest
from google.protobuf.internal import api_implementation
# Run all tests from the original module by putting them in our namespace.
# pylint: disable=wildcard-import
from google.protobuf.internal.message_test import *
class ConfirmPurePythonTest(basetest.TestCase):
def testImplementationSetting(self):
self.assertEqual('python', api_implementation.Type())
if __name__ == '__main__':
basetest.main()
|
apache-2.0
|
nekulin/arangodb
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/cygwinccompiler.py
|
50
|
17299
|
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: cygwinccompiler.py 65834 2008-08-18 19:23:47Z amaury.forgeotdarc $"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
else:
raise ValueError("Unknown MS Compiler version %i " % msc_Ver)
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
compiler_so='gcc -mno-cygwin -mdll -O -Wall',
compiler_cxx='g++ -mno-cygwin -O -Wall',
linker_exe='gcc -mno-cygwin',
linker_so='%s -mno-cygwin %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
s = f.read()
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
|
apache-2.0
|
pixelated-project/pixelated-user-agent
|
service/test/functional/features/page_objects/backup_account_page.py
|
2
|
1111
|
#
# Copyright (c) 2017 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from base_page import BasePage
class BackupAccountPage(BasePage):
def __init__(self, context):
super(BackupAccountPage, self).__init__(context, context.backup_account_url)
self._locators = {
'logout_button': 'button[name="logout"]'
}
def logout(self):
logout_button = self.find_element_by_css_selector(self._locators['logout_button'])
logout_button.click()
|
agpl-3.0
|
gnowledge/ncert_nroer
|
objectapp/cnlgb.py
|
3
|
21411
|
from objectapp.models import *
from gstudio.models import *
from django.template.defaultfilters import slugify
import inflect
def get_cnlgb_list(self):
z = []
k = get_lex_sentence(self)
if not k:
pass
else:
z.extend(k)
l = get_lex_sentence_optional(self)
if not l:
pass
else:
z.extend(l)
m = get_CNL_dependency(self)
if not m:
pass
else:
z.extend(m)
n = membership_sentence(self)
if not n:
pass
else:
z.extend(n)
o = get_attr_sentence(self)
if not o:
pass
else:
z.extend(o)
rel = get_rel(self)
if not rel:
pass
else:
z.extend(rel)
return z
def advanced_cnlgb(self):
zz=[]
p = get_CNL_sentence_authors(self)
if not p:
pass
else:
zz.extend(p)
"""Generates CNL sentence for RT/R"""
rt = get_RT_sentence(self)
if not rt:
pass
else:
zz.extend(rt)
return zz
def get_lex_sentence(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
cns=self.get_nbh
d=[]
for k in cns:
if k=='title':
title = str(cns['title'])
title_slug = slugify(title)
if not cns[k]:
pass
else:
g=str(title_slug)+" is a proper-noun."
d.append(g.capitalize())
return d
def get_lex_sentence_optional(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
cns=self.get_nbh
title = str(cns['title'])
title_slug = slugify(title)
d=[]
for k in cns:
if k =='altnames':
alt_name = str(cns['altnames'])
alt_slug = slugify(alt_name)
if not cns[k]:
pass
else:
i=str(cns[k])+" is an alternate name for "+str(title_slug)+"."
d.append(i.capitalize())
elif k == 'plural':
if not cns[k]:
pass
else:
pl = str(cns[k])
pl_slug = slugify(pl)
m = str(pl_slug)+" is a plural of "+str(title_slug)+"."
d.append(m.capitalize())
return d
def get_CNL_sentence_authors(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
title = self.title
title_slug = slugify(title)
d=[]
if self.authors.all():
auth = []
auth = self.authors.all()
len_auth=len(auth)
if len_auth == 1:
for each in auth:
aut=slugify(each)
e=str(aut).title()+" is an author to "+str(title_slug).title()+"."
d.append(e)
else:
##print "len not 1"
y=[]
for each in self.authors.all():
a=each
a_slug=slugify(a)
y.append(a_slug)
for e_i in y:
if y.index(e_i) == 0:
sen = str(e_i)
else:
sen = str(sen)+" and "+str(e_i)
e = str(sen).title()+" are all chosen authors to "+str(title_slug).title()+"."
d.append(e)
return d
def get_CNL_dependency(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
title = self.title
title_slug = slugify(title)
d=[]
if self.prior_nodes.all():
p_n_a = []
p_n_a = self.prior_nodes.all()
len_pna=len(p_n_a)
if len_pna == 1:
for each in p_n_a:
pn=slugify(each)
h=str(pn).title()+" is a prior_node for "+str(title_slug).title()+"."+str(title_slug).title()+" depends upon "+str(pn).title()+"."
d.append(h)
else:
sen = dependency_plural(p_n_a)
h = str(sen)+". It is the prior_node and required for the meaning of "+str(title_slug).title()+"."
d.append(h)
if self.posterior_nodes.all():
p_n_a = []
p_n_a = self.posterior_nodes.all()
len_pna=len(p_n_a)
if len_pna == 1:
for each in p_n_a:
pn = slugify(each)
p = str(pn).title()+" is a posterior_node for "+str(title_slug).title()+"."+str(title_slug).title()+" is required for the meaning of "+str(pn).title()+"."
d.append(p)
else:
sen = dependency_plural(p_n_a)
p = str(sen)+". It is the posterior_node and depends on the meaning of "+str(title_slug).title()+"."
d.append(p)
return d
#Generates dependency sentence for plural
def dependency_plural(p_n_a):
for each in p_n_a:
a = each
each_r = each.ref.__class__.__name__
a_slug = slugify(a)
y=[]
for a_slug in p_n_a:
if len(y) == 0:
#print "If Y is empty, for first item"
if each_r == 'Relationtype':
b_slug = str(a_slug).title()+" is an adjective"
else:
b_slug = str(a_slug).title()+" is a proper-noun"
y.append(b_slug)
else:
if each_r != 'Relationtype':
#print "Its not a relation_type, but a proper-noun"
aa_slug = str(a_slug).title()+" is a proper-noun"
y.append(aa_slug)
else:
#print "It is a relationtype"
ab_slug = str(a_slug).title()+" is an adjective"
y.append(ab_slug)
for e_i in y:
if y.index(e_i) == 0:
sen = str(e_i)
else:
sen = str(sen)+" and "+str(e_i)
return sen
def membership_sentence(self):
"""Returns CNL sentences for membership"""
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
cns=self.get_nbh
title = str(cns['title'])
title_slug = slugify(title)
d=[]
for k in cns:
if self.ref.__class__.__name__ is 'Gbobject':
if k=='member_of':
if not cns[k]:
pass
else:
cmm = []
cmm = self.objecttypes.all()
len_cmm=len(cmm)
if len_cmm == 1:
for each in cmm:
cm=slugify(each)
j=str(title_slug)+" is a member of a "+str(cm)+"."
d.append(j.capitalize())
else:
#print "len not 1"
y=[]
for each in self.objecttypes.all():
a=each
a_slug=slugify(a)
y.append(a_slug)
for e_i in y:
if y.index(e_i) == 0:
sen = str(e_i)
else:
sen = str(sen)+" and a "+str(e_i)
j= str(title_slug)+" is a member of a "+sen+"."
d.append(j.capitalize())
return d
#Returns attributes for the given Gbobject
def get_attr_sentence(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
at = Gbobject.get_attributes(self)
if at:
a = []
title = self.title
for k,v in at.iteritems():
attr = k
for each in v:
value = each
sen = "The "+str(attr)+" of "+str(title)+" is "+str(value)+"."
a.append(sen)
return a
def get_list_relation(self, lr):
"""Returns the list of relations"""
lst = []
gbr = self.get_relations1
if not gbr:
pass
else:
for k,v in gbr.iteritems():
if k == 'lrelations':
val_l = v
if k == 'rrelations':
val_r = v
if lr == 0:
return val_l
elif lr == 1:
return val_r
def get_CNL_sentence_RT(self, lst, rst, detail_level):
if self.ref.__class__.__name__ is 'Relationtype':
core = [] #core data list
core_t = []
core_i = []
reflexive = []
adv = [] # advanced data list
title=self.title
title_slug=slugify(title)
inverse=self.inverse
inverse_slug=slugify(inverse)
is_symmetrical=self.is_symmetrical
is_reflexive=self.is_reflexive
llist = []
rlist = []
llist = lst
rlist = rst
#Flag variable that checks if plural-left,right
plural_l = 0
plural_r = 0
if isinstance(llist,list):
"""If llist is a list"""
ll = []
for each in llist:
if each.ref.__class__.__name__ is not 'Gbobject':
"""Common-noun"""
lst = "a "+str(each).lower()
else:
"""Proper-noun"""
lst = str(each).title()
ll.append(lst)
if len(ll) == 1:
for e in ll:
left_subtype = e
else:
"""Since left-ST is plural, flag value assigned 1 """
plural_l = 1
for e in ll:
if ll.index(e)==0:
sen = str(e)
else:
sen = str(sen)+" and "+str(e)
left_subtype = sen
else:
"""If llist is not a list"""
if lst.ref.__class__.__name__ is 'Gbobject':
left_subtype = lst
else:
left_subtype = "a "+str(lst)
if isinstance(rlist,list):
"""If rlist is a list"""
rl = []
#print "Is a list"
for each in rlist:
if each.ref.__class__.__name__ is not 'Gbobject':
"""Common-noun"""
rst = "a "+str(each)
else:
rst = each
rl.append(rst)
if len(rl) == 1:
for e in rl:
right_subtype = e
else:
"""Since right-ST is plural, flag value assigned 1 """
plural_r = 1
for e in rl:
if rl.index(e)==0:
sen = str(e)
else:
sen = str(sen)+" and "+str(e)
right_subtype = sen
else:
"""If Rlist is not a list"""
if rst.ref.__class__.__name__ is 'Gbobject':
right_subtype = rst
else:
right_subtype = "a "+str(rst)
#Core sentence - title
rel = rel_CNL(self, left_subtype, right_subtype, plural_l)
core_t.extend(rel)
rlex = rel_lex_sentence(self)
adv.extend(rlex)
app_NT = get_app_NT(self)
adv.extend(app_NT)
st = get_RT_subjecttype(self, left_subtype, right_subtype)
adv.extend(st)
#Is symmetrical
if is_symmetrical:
symm = is_symmetrical_RT(self, left_subtype, right_subtype, plural_r)
core_i.extend(symm)
else:
asymm = is_asymmetrical_RT(self, left_subtype, right_subtype, plural_r)
core_i.extend(asymm)
#Is reflexive
if not is_reflexive:
pass
else:
if detail_level == 1 or plural_l == 1:
st = right_subtype
else:
st = left_subtype
is_refl = is_reflexive_sentence(self, st)
reflexive.extend(is_refl)
if detail_level==0:
#Title,Reflexive
for e in core_t:
a = e
reflexive.insert(0, a)
return reflexive
elif detail_level==1:
#Inverse,reflexive
for e in core_i:
a = e
reflexive.insert(0, a)
return reflexive
elif detail_level==2:
#Title, Inverse & Reflexive
core.extend(core_t)
core.extend(core_i)
core.extend(reflexive)
return core
elif detail_level==3:
#Return advanced grammatical information
return adv
elif detail_level==4:
#Return all info - Core & Advanced Info
newlist = []
newlist.extend(core)
newlist.extend(adv)
return newlist
#Checks if RT-title is a transitive verb finite singular or an iterative adjective
def istv_title(self):
p = inflect.engine()
from django.template.defaultfilters import slugify
destination = open( "/home/user/gnowsys-studio/demo/aFile.pl", "r+" )
f = destination.read()
a_t = self.title
a = slugify(a_t)
if '-' not in a:
if a[-1] == 's':
a_s = p.singular_noun(a)
a_lex = "tv_finsg("+a+", "+str(a_s)+")."
strpos = f.find(a_lex)
if strpos != -1:
return True
else:
return False
#Checks if RT-inverse is a transitive verb finite singular or an iterative adjective
def istv_inverse(self):
p = inflect.engine()
destination = open( "/home/user/gnowsys-studio/demo/aFile.pl", "r+" )
f = destination.read()
a_t = self.inverse
a = slugify(a_t)
if '-' not in a:
if a[-1] == 's':
a_s = p.singular_noun(a)
a_lex = "tv_finsg("+a+", "+str(a_s)+")."
strpos = f.find(a_lex)
if strpos != -1:
return True
else:
return False
def rel_CNL(self, left_subtype, right_subtype, plural_l):
"""To generate sentence for relation"""
title = self.title
title_slug = slugify(title)
if self.ref.__class__.__name__ is 'Relationtype':
rel = []
if istv_title(self):
st = str(left_subtype)+" "+str(title).lower()+" "+str(right_subtype)+"."
else:
if plural_l == 0:
st = str(left_subtype)+" is "+str(title).lower()+" "+str(right_subtype)+"."
elif plural_l == 1:
st = str(left_subtype)+" are "+str(title).lower()+" "+str(right_subtype)+"."
rel.append(st)
return rel
def is_reflexive_sentence(self, st):
"""Generates reflexive sentence"""
refl = []
title = self.title
title_slug = slugify(title)
if istv_title(self):
j= "It is a reflexive sentence. "+str(st).title()+" "+str(title)+" "+str(st)+"."
else:
j= "It is a reflexive sentence. "+str(st).title()+" is "+str(title)+" "+str(st)+"."
refl.append(j)
return refl
def is_symmetrical_RT(self, left_subtype, right_subtype, plural_r):
"""Generates CNL Sentence for Relation/RT if symmetrical"""
symm = []
title = self.title
title_slug = slugify(title)
#if (type_lst == 'Gbobject' and type_rst == 'Gbobject'):
if istv_title(self):
g = str(right_subtype).title()+" "+str(title)+" "+str(left_subtype).title()+"."
else:
if plural_r == 0:
g = str(right_subtype).title()+" is "+str(title)+" "+str(left_subtype).title()+"."
else:
g = str(right_subtype).title()+" are "+str(title)+" "+str(left_subtype).title()+"."
symm.append(g)
return symm
def is_asymmetrical_RT(self, left_subtype, right_subtype, plural_r):
"""Generates CNL Sentence for Relation/RT if symmetrical"""
asymm = []
inverse = self.inverse
inverse_slug = slugify(inverse)
if istv_inverse(self):
g = str(right_subtype).title()+" "+str(inverse)+" "+str(left_subtype).title()+"."
else:
if plural_r == 0:
g = str(right_subtype).title()+" is "+str(inverse)+" "+str(left_subtype).title()+"."
elif plural_r == 1:
g = str(right_subtype).title()+" is "+str(inverse)+" "+str(left_subtype).title()+"."
asymm.append(g)
return asymm
def rel_lex_sentence(self):
"""Generates RT's title & inverse sentence"""
if self.ref.__class__.__name__ is 'Relationtype':
rlex = []
title=self.title
title_slug=slugify(title)
inverse=self.inverse
inverse_slug=slugify(inverse)
h="A relation_type's title is "+str(title_slug)+"."
rlex.append(h.capitalize())
if (title==inverse):
b="Its title and its inverse are equal."
else:
b="Its inverse is "+str(inverse_slug)+"."
rlex.append(b.capitalize())
return rlex
def get_app_NT(self):
"""Generates CNL Sentences for left & right applicable NT for RT"""
if self.ref.__class__.__name__ is 'Relationtype':
a = []
l_app = self.left_applicable_nodetypes
r_app=self.right_applicable_nodetypes
e = "Its left_applicable_nodetype is "+str(l_app).upper()+"."
a.append(e)
f = "Its right_applicable_nodetype is "+str(r_app).upper()+"."
a.append(f)
return a
def get_RT_sentence(self):
#Generates CNL Sentences in RT in detail
if self.ref.__class__.__name__ is 'Relationtype':
sentence = get_CNL_sentence_RT(self, self.left_subjecttype, self.right_subjecttype, 4)
return sentence
#Generates CNL sentences in Relation in detail
elif self.ref.__class__.__name__ is 'Relation':
sentence = get_CNL_sentence_RT(self.relationtype, self.left_subject, self.right_subject, 4)
return sentence
#Get relations for the Gbobject:Singular, plural
def get_rel(self):
sen = []
sentence = []
lr = Relation.objects.filter(left_subject = self.id)
rr = Relation.objects.filter(right_subject = self.id)
if lr:
"""List which stores each right subject"""
lst = get_list_relation(self, 0)
for k,v in lst.iteritems():
rel = Relationtype.objects.filter(title = k)
val = v
for rt in rel:
sen = get_CNL_sentence_RT(rt, self, val, 0)
sentence.extend(sen)
if rr:
"""List which stores each left subject"""
lst = get_list_relation(self, 1)
for k,v in lst.iteritems():
rel = Relationtype.objects.filter(inverse = k)
val = v
for rt in rel:
sen = get_CNL_sentence_RT(rt, val, self,1)
sentence.extend(sen)
return sentence
def get_RT_subjecttype(self, left_subtype, right_subtype):
"""Returns CNL sentence of left & right subject type of RT"""
if self.ref.__class__.__name__ is 'Relationtype':
st = []
ce = "Its left_subjecttype is "+str(left_subtype)+"."
c = ce.capitalize()
st.append(c)
de = "Its right_subjecttype is "+str(right_subtype)+"."
d = de.capitalize()
st.append(d)
return st
else:
pass
|
agpl-3.0
|
dbbhattacharya/kitsune
|
vendor/packages/logilab-common/optparser.py
|
6
|
3225
|
# -*- coding: utf-8 -*-
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extend OptionParser with commands.
Example:
>>> parser = OptionParser()
>>> parser.usage = '%prog COMMAND [options] <arg> ...'
>>> parser.add_command('build', 'mymod.build')
>>> parser.add_command('clean', run_clean, add_opt_clean)
>>> run, options, args = parser.parse_command(sys.argv[1:])
>>> return run(options, args[1:])
With mymod.build that defines two functions run and add_options
"""
__docformat__ = "restructuredtext en"
# XXX merge with optik_ext ? merge with clcommands ?
import sys
import optparse
class OptionParser(optparse.OptionParser):
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
self._commands = {}
self.min_args, self.max_args = 0, 1
def add_command(self, name, mod_or_funcs, help=''):
"""name of the command
name of module or tuple of functions (run, add_options)
"""
assert isinstance(mod_or_funcs, str) or isinstance(mod_or_funcs, tuple), \
"mod_or_funcs has to be a module name or a tuple of functions"
self._commands[name] = (mod_or_funcs, help)
def print_main_help(self):
optparse.OptionParser.print_help(self)
print '\ncommands:'
for cmdname, (_, help) in self._commands.items():
print '% 10s - %s' % (cmdname, help)
def parse_command(self, args):
if len(args) == 0:
self.print_main_help()
sys.exit(1)
cmd = args[0]
args = args[1:]
if cmd not in self._commands:
if cmd in ('-h', '--help'):
self.print_main_help()
sys.exit(0)
elif self.version is not None and cmd == "--version":
self.print_version()
sys.exit(0)
self.error('unknown command')
self.prog = '%s %s' % (self.prog, cmd)
mod_or_f, help = self._commands[cmd]
# optparse inserts self.description between usage and options help
self.description = help
if isinstance(mod_or_f, str):
exec 'from %s import run, add_options' % mod_or_f
else:
run, add_options = mod_or_f
add_options(self)
(options, args) = self.parse_args(args)
if not (self.min_args <= len(args) <= self.max_args):
self.error('incorrect number of arguments')
return run, options, args
|
bsd-3-clause
|
agilemobiledev/spiderfoot
|
ext/dns/rdtypes/ANY/NSEC3PARAM.py
|
85
|
3169
|
# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.rdata
class NSEC3PARAM(dns.rdata.Rdata):
"""NSEC3PARAM record
@ivar algorithm: the hash algorithm number
@type algorithm: int
@ivar flags: the flags
@type flags: int
@ivar iterations: the number of iterations
@type iterations: int
@ivar salt: the salt
@type salt: string"""
__slots__ = ['algorithm', 'flags', 'iterations', 'salt']
def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
super(NSEC3PARAM, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.flags = flags
self.iterations = iterations
self.salt = salt
def to_text(self, origin=None, relativize=True, **kw):
if self.salt == '':
salt = '-'
else:
salt = self.salt.encode('hex-codec')
return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations, salt)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
algorithm = tok.get_uint8()
flags = tok.get_uint8()
iterations = tok.get_uint16()
salt = tok.get_string()
if salt == '-':
salt = ''
else:
salt = salt.decode('hex-codec')
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.salt)
file.write(struct.pack("!BBHB", self.algorithm, self.flags,
self.iterations, l))
file.write(self.salt)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
wire[current : current + 5])
current += 5
rdlen -= 5
salt = wire[current : current + slen].unwrap()
current += slen
rdlen -= slen
if rdlen != 0:
raise dns.exception.FormError
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_wire = classmethod(from_wire)
def _cmp(self, other):
b1 = cStringIO.StringIO()
self.to_wire(b1)
b2 = cStringIO.StringIO()
other.to_wire(b2)
return cmp(b1.getvalue(), b2.getvalue())
|
gpl-2.0
|
ilastikdev/ilastik
|
ilastik/applets/objectClassification/opObjectClassification.py
|
1
|
59400
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import numpy
import numpy.lib.recfunctions as rfn
import vigra
import time
import warnings
import itertools
from collections import defaultdict
from functools import partial
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.stype import Opaque
from lazyflow.rtype import List
from lazyflow.operators import OpValueCache, OpSlicedBlockedArrayCache, OperatorWrapper, OpMultiArrayStacker
from lazyflow.request import Request, RequestPool, RequestLock
from lazyflow.classifiers import ParallelVigraRfLazyflowClassifierFactory, ParallelVigraRfLazyflowClassifier
from ilastik.utility import OperatorSubView, MultiLaneOperatorABC, OpMultiLaneWrapper
from ilastik.utility.mode import mode
from ilastik.applets.objectExtraction.opObjectExtraction import default_features_key
from ilastik.applets.objectExtraction.opObjectExtraction import OpObjectExtraction
from ilastik.applets.base.applet import DatasetConstraintError
import logging
logger = logging.getLogger(__name__)
MISSING_VALUE = 0
class OpObjectClassification(Operator, MultiLaneOperatorABC):
"""The top-level operator for object classification.
Most functionality is handled by specialized operators such as
OpObjectTrain and OpObjectPredict.
Also transfers existing labels if the upstream object segmentation
changes. The transfer is conservative: labels only get transfered
from an old object to a new object if they overlap sufficiently,
and the label does not overlap with other objects.
"""
name = "OpObjectClassification"
category = "Top-level"
###############
# Input slots #
###############
BinaryImages = InputSlot(level=1) # for visualization
RawImages = InputSlot(level=1) # for visualization
SegmentationImages = InputSlot(level=1) #connected components
# the actual feature arrays
# same format as OpObjectExtraction.RegionFeatures
ObjectFeatures = InputSlot(rtype=List, stype=Opaque, level=1)
# the names of the features computed in the object extraction operator
# same format as OpObjectExtraction.ComputedFeatureNames
ComputedFeatureNames = InputSlot(rtype=List, stype=Opaque)
# the features selected in our own GUI
# same format as ComputedFeatureNames
SelectedFeatures = InputSlot(rtype=List, stype=Opaque)
LabelsAllowedFlags = InputSlot(stype='bool', level=1)
AllowDeleteLabels = InputSlot(stype='bool', value=True)
AllowDeleteLastLabelOnly = InputSlot(stype='bool', value=False)
AllowAddLabel = InputSlot(stype='bool', value=True)
SuggestedLabelNames = InputSlot(stype=Opaque, value=[])
LabelInputs = InputSlot(stype=Opaque, rtype=List, optional=True, level=1)
FreezePredictions = InputSlot(stype='bool', value=False)
EnableLabelTransfer = InputSlot(stype='bool', value=False)
# for reading from disk
InputProbabilities = InputSlot(level=1, stype=Opaque, rtype=List, optional=True)
################
# Output slots #
################
NumLabels = OutputSlot()
Classifier = OutputSlot()
LabelImages = OutputSlot(level=1)
Predictions = OutputSlot(level=1, stype=Opaque, rtype=List)
Probabilities = OutputSlot(level=1, stype=Opaque, rtype=List)
# pulls whatever is in the cache, but does not try to compute more.
CachedProbabilities = OutputSlot(level=1, stype=Opaque, rtype=List)
PredictionImages = OutputSlot(level=1) #Labels, by the majority vote
UncachedPredictionImages = OutputSlot(level=1)
PredictionProbabilityChannels = OutputSlot(level=2) # Classification predictions, enumerated by channel
ProbabilityChannelImage = OutputSlot(level=1)
SegmentationImagesOut = OutputSlot(level=1) #input connected components
BadObjects = OutputSlot(level=1, stype=Opaque, rtype=List) #Objects with NaN-like features
BadObjectImages = OutputSlot(level=1) #Images, where objects with NaN-like features are black
Warnings = OutputSlot(stype=Opaque) #Warnings about objects with NaN-like features encountered in training
# Used for labeling
Eraser = OutputSlot()
DeleteLabel = OutputSlot()
# GUI-only (not part of the pipeline, but saved to the project)
LabelNames = OutputSlot()
LabelColors = OutputSlot()
PmapColors = OutputSlot()
def __init__(self, *args, **kwargs):
super(OpObjectClassification, self).__init__(*args, **kwargs)
# internal operators
opkwargs = dict(parent=self)
self.opTrain = OpObjectTrain(parent=self)
self.opPredict = OpMultiLaneWrapper(OpObjectPredict, **opkwargs)
self.opLabelsToImage = OpMultiLaneWrapper(OpRelabelSegmentation, **opkwargs)
self.opPredictionsToImage = OpMultiLaneWrapper(OpRelabelSegmentation, **opkwargs)
self.opPredictionImageCache = OpMultiLaneWrapper(OpSlicedBlockedArrayCache, **opkwargs)
self.opPredictionImageCache.name="OpObjectClassification.opPredictionImageCache"
self.opProbabilityChannelsToImage = OpMultiLaneWrapper(OpMultiRelabelSegmentation, **opkwargs)
self.opBadObjectsToImage = OpMultiLaneWrapper(OpRelabelSegmentation, **opkwargs)
self.opBadObjectsToWarningMessage = OpBadObjectsToWarningMessage(parent=self)
self.classifier_cache = OpValueCache(parent=self)
self.classifier_cache.name = "OpObjectClassification.classifier_cache"
# connect inputs
self.opTrain.Features.connect(self.ObjectFeatures)
self.opTrain.Labels.connect(self.LabelInputs)
self.opTrain.FixClassifier.setValue(False)
self.opTrain.SelectedFeatures.connect(self.SelectedFeatures)
self.classifier_cache.Input.connect(self.opTrain.Classifier)
# Find the highest label in all the label images
self.opMaxLabel = OpMaxLabel( parent=self )
self.opMaxLabel.Inputs.connect( self.LabelInputs )
self.opPredict.Features.connect(self.ObjectFeatures)
self.opPredict.Classifier.connect(self.classifier_cache.Output)
self.opPredict.SelectedFeatures.connect(self.SelectedFeatures)
# Not directly connected. Must always use setValue() to update.
# See _updateNumClasses()
# self.opPredict.LabelsCount.connect(self.opMaxLabel.Output) # See _updateNumClasses()
self.opLabelsToImage.Image.connect(self.SegmentationImages)
self.opLabelsToImage.ObjectMap.connect(self.LabelInputs)
self.opLabelsToImage.Features.connect(self.ObjectFeatures)
self.opPredictionsToImage.Image.connect(self.SegmentationImages)
self.opPredictionsToImage.ObjectMap.connect(self.opPredict.Predictions)
self.opPredictionsToImage.Features.connect(self.ObjectFeatures)
#self.opPredictionImageCache.name = "prediction_image_cache"
self.opPredictionImageCache.fixAtCurrent.connect( self.FreezePredictions )
self.opPredictionImageCache.Input.connect( self.opPredictionsToImage.Output )
self.opProbabilityChannelsToImage.Image.connect(self.SegmentationImages)
self.opProbabilityChannelsToImage.ObjectMaps.connect(self.opPredict.ProbabilityChannels)
self.opProbabilityChannelsToImage.Features.connect(self.ObjectFeatures)
class OpWrappedCache(Operator):
"""
This quick hack is necessary because there's not currently a way to wrap an OperatorWrapper.
We need to double-wrap the cache, so we need this operator to provide the first level of wrapping.
"""
Input = InputSlot(level=1)
innerBlockShape = InputSlot()
outerBlockShape = InputSlot()
fixAtCurrent = InputSlot(value = False)
Output = OutputSlot(level=1)
def __init__(self, *args, **kwargs):
super( OpWrappedCache, self ).__init__( *args, **kwargs )
self._innerOperator = OperatorWrapper( OpSlicedBlockedArrayCache, parent=self )
self._innerOperator.Input.connect( self.Input )
self._innerOperator.fixAtCurrent.connect( self.fixAtCurrent )
self._innerOperator.innerBlockShape.connect( self.innerBlockShape )
self._innerOperator.outerBlockShape.connect( self.outerBlockShape )
self.Output.connect( self._innerOperator.Output )
def execute(self, slot, subindex, roi, destination):
assert False, "Shouldn't get here."
def propagateDirty(self, slot, subindex, roi):
pass # Nothing to do...
# Wrap the cache for probability channels twice TWICE.
self.opProbChannelsImageCache = OpMultiLaneWrapper( OpWrappedCache, parent=self )
self.opProbChannelsImageCache.name = "OpObjectClassification.opProbChannelsImageCache"
self.opProbChannelsImageCache.Input.connect(self.opProbabilityChannelsToImage.Output)
self.opProbChannelsImageCache.fixAtCurrent.connect( self.FreezePredictions )
self.opBadObjectsToImage.Image.connect(self.SegmentationImages)
self.opBadObjectsToImage.ObjectMap.connect(self.opPredict.BadObjects)
self.opBadObjectsToImage.Features.connect(self.ObjectFeatures)
self.opBadObjectsToWarningMessage.BadObjects.connect(self.opTrain.BadObjects)
self.opPredict.InputProbabilities.connect(self.InputProbabilities)
def _updateNumClasses(*args):
"""
When the number of labels changes, we MUST make sure that the prediction image changes its shape (the number of channels).
Since setupOutputs is not called for mere dirty notifications, but is called in response to setValue(),
we use this function to call setValue().
"""
numClasses = len(self.LabelNames.value)
self.opPredict.LabelsCount.setValue( numClasses )
self.opTrain.LabelsCount.setValue( numClasses )
self.NumLabels.setValue( numClasses )
self.LabelNames.notifyDirty( _updateNumClasses )
self.LabelNames.setValue( [] )
self.LabelColors.setValue( [] )
self.PmapColors.setValue( [] )
self.opStackProbabilities = OperatorWrapper( OpMultiArrayStacker, parent=self )
self.opStackProbabilities.Images.connect( self.opProbChannelsImageCache.Output )
self.opStackProbabilities.AxisFlag.setValue('c')
# connect outputs
self.LabelImages.connect(self.opLabelsToImage.Output)
self.Predictions.connect(self.opPredict.Predictions)
self.Probabilities.connect(self.opPredict.Probabilities)
self.CachedProbabilities.connect(self.opPredict.CachedProbabilities)
self.PredictionImages.connect(self.opPredictionImageCache.Output)
self.UncachedPredictionImages.connect(self.opPredictionsToImage.Output)
self.PredictionProbabilityChannels.connect(self.opProbChannelsImageCache.Output)
self.ProbabilityChannelImage.connect( self.opStackProbabilities.Output )
self.BadObjects.connect(self.opPredict.BadObjects)
self.BadObjectImages.connect(self.opBadObjectsToImage.Output)
self.Warnings.connect(self.opBadObjectsToWarningMessage.WarningMessage)
self.Classifier.connect(self.classifier_cache.Output)
self.SegmentationImagesOut.connect(self.SegmentationImages)
# Not directly connected. Must always use setValue() to update.
# See _updateNumClasses()
# self.NumLabels.connect( self.opMaxLabel.Output )
self.Eraser.setValue(100)
self.DeleteLabel.setValue(-1)
self._labelBBoxes = []
self._ambiguousLabels = []
self._needLabelTransfer = False
def handleNewInputImage(multislot, index, *args):
def handleInputReady(slot):
self.setupCaches(multislot.index(slot))
multislot[index].notifyReady(handleInputReady)
self.SegmentationImages.notifyInserted(handleNewInputImage)
self._predict_enabled = False
def setupCaches(self, imageIndex):
"""Setup the label input and caches to correct dimensions"""
numImages=len(self.SegmentationImages)
cctype = self.SegmentationImages[imageIndex].meta.dtype
if not issubclass(cctype, numpy.integer):
msg = "Connected Components image should be of integer type.\n"\
"Ask your workflow developer to change the input applet accordingly.\n"
raise DatasetConstraintError("Object Classification", msg)
self.LabelInputs.resize(numImages)
self.LabelInputs[imageIndex].meta.shape = (1,)
self.LabelInputs[imageIndex].meta.dtype = object
self.LabelInputs[imageIndex].meta.mapping_dtype = numpy.uint8
self.LabelInputs[imageIndex].meta.axistags = None
self._resetLabelInputs(imageIndex)
def _resetLabelInputs(self, imageIndex, roi=None):
labels = dict()
for t in range(self.SegmentationImages[imageIndex].meta.shape[0]):
#initialize, because volumina needs to reshape to use it as a datasink
labels[t] = numpy.zeros((2,))
self.LabelInputs[imageIndex].setValue(labels)
if imageIndex in range(len(self._ambiguousLabels)):
self._ambiguousLabels[imageIndex] = None
self._labelBBoxes[imageIndex] = dict()
else:
self._ambiguousLabels.insert(imageIndex, None)
self._labelBBoxes.insert(imageIndex, dict())
def removeLabel(self, label):
#remove this label from the inputs
for islot, label_slot in enumerate(self.LabelInputs):
if not label_slot.ready() or islot>= len(self.RawImages) or \
not self.RawImages[islot].ready():
continue
cur_labels = label_slot.value
nTimes = self.RawImages[islot].meta.shape[0]
nLabels = len(self.LabelNames.value)+1 #+1 because we already took out the name in labelingGui
for t in range(nTimes):
label_values = cur_labels[t]
label_values[label_values==label+1] = 0
for nextLabel in range(label, nLabels):
label_values[label_values==nextLabel+1]=nextLabel
self.LabelInputs.setDirty([])
def setupOutputs(self):
self.Warnings.meta.shape = (1,)
axisOrder = [ tag.key for tag in self.RawImages[0].meta.axistags ]
blockDimsX = { 't' : (1,1),
'z' : (128,256),
'y' : (128,256),
'x' : (1,1),
'c' : (100, 100) }
blockDimsY = { 't' : (1,1),
'z' : (128,256),
'y' : (1,1),
'x' : (128,256),
'c' : (100,100) }
blockDimsZ = { 't' : (1,1),
'z' : (1,1),
'y' : (128,256),
'x' : (128,256),
'c' : (100,100) }
innerBlockShapeX = tuple( blockDimsX[k][0] for k in axisOrder )
outerBlockShapeX = tuple( blockDimsX[k][1] for k in axisOrder )
innerBlockShapeY = tuple( blockDimsY[k][0] for k in axisOrder )
outerBlockShapeY = tuple( blockDimsY[k][1] for k in axisOrder )
innerBlockShapeZ = tuple( blockDimsZ[k][0] for k in axisOrder )
outerBlockShapeZ = tuple( blockDimsZ[k][1] for k in axisOrder )
self.opPredictionImageCache.innerBlockShape.setValue( (innerBlockShapeX, innerBlockShapeY, innerBlockShapeZ) )
self.opPredictionImageCache.outerBlockShape.setValue( (outerBlockShapeX, outerBlockShapeY, outerBlockShapeZ) )
self.opProbChannelsImageCache.innerBlockShape.setValue( (innerBlockShapeX, innerBlockShapeY, innerBlockShapeZ) )
self.opProbChannelsImageCache.outerBlockShape.setValue( (outerBlockShapeX, outerBlockShapeY, outerBlockShapeZ) )
def setInSlot(self, slot, subindex, roi, value):
pass
def propagateDirty(self, slot, subindex, roi):
if slot==self.SegmentationImages and len(self.LabelInputs)>0:
self._ambiguousLabels[subindex[0]] = self.LabelInputs[subindex[0]].value
self._needLabelTransfer = True
def assignObjectLabel(self, imageIndex, coordinate, assignedLabel):
"""
Update the assigned label of the object located at the given coordinate.
Does nothing if no object resides at the given coordinate.
"""
segmentationShape = self.SegmentationImagesOut[imageIndex].meta.shape
assert len(coordinate) == len( segmentationShape ), "Coordinate: {} is has the wrong length for this image, which is of shape: {}".format( coordinate, segmentationShape )
slicing = tuple(slice(i, i+1) for i in coordinate)
arr = self.SegmentationImagesOut[imageIndex][slicing].wait()
objIndex = arr.flat[0]
if objIndex == 0: # background; FIXME: do not hardcode
return
timeCoord = coordinate[0]
labelslot = self.LabelInputs[imageIndex]
labelsdict = labelslot.value
labels = labelsdict[timeCoord]
nobjects = len(labels)
if objIndex >= nobjects:
newLabels = numpy.zeros((objIndex + 1),)
newLabels[:nobjects] = labels[:]
labels = newLabels
labels[objIndex] = assignedLabel
labelsdict[timeCoord] = labels
labelslot.setValue(labelsdict)
labelslot.setDirty([(timeCoord, objIndex)])
#Fill the cache of label bounding boxes, if it was empty
if len(self._labelBBoxes[imageIndex].keys())==0:
#it's the first label for this image
feats = self.ObjectFeatures[imageIndex]([timeCoord]).wait()
#the bboxes should be the same for all channels
mins = feats[timeCoord][default_features_key]["Coord<Minimum>"]
maxs = feats[timeCoord][default_features_key]["Coord<Maximum>"]
bboxes = dict()
bboxes["Coord<Minimum>"] = mins
bboxes["Coord<Maximum>"] = maxs
self._labelBBoxes[imageIndex][timeCoord]=bboxes
def triggerTransferLabels(self, imageIndex):
if not self._needLabelTransfer:
return None
if not self.SegmentationImages[imageIndex].ready():
return None
if len(self._labelBBoxes[imageIndex].keys())==0:
#we either don't have any labels or we just read the project from file
#nothing to transfer
self._needLabelTransfer = False
return None
if not self.EnableLabelTransfer:
self._resetLabelInputs(imageIndex)
self._needLabelTransfer = False
return None
labels = dict()
for timeCoord in range(self.SegmentationImages[imageIndex].meta.shape[0]):
#we have to get new object features to get bounding boxes
logger.info("Transferring labels to the new segmentation. This might take a while...")
new_feats = self.ObjectFeatures[imageIndex]([timeCoord]).wait()
coords = dict()
coords["Coord<Minimum>"] = new_feats[timeCoord][default_features_key]["Coord<Minimum>"]
coords["Coord<Maximum>"] = new_feats[timeCoord][default_features_key]["Coord<Maximum>"]
#FIXME: pass axistags
new_labels, old_labels_lost, new_labels_lost = self.transferLabels(
self._ambiguousLabels[imageIndex][timeCoord],
self._labelBBoxes[imageIndex][timeCoord],
coords
)
labels[timeCoord] = new_labels
self._labelBBoxes[imageIndex][timeCoord]=coords
self._ambiguousLabels[imageIndex][timeCoord]=numpy.zeros((2,)) #initialize ambig. labels as normal labels
self.LabelInputs[imageIndex].setValue(labels)
self._needLabelTransfer = False
return new_labels, old_labels_lost, new_labels_lost
@staticmethod
def transferLabels(old_labels, old_bboxes, new_bboxes, axistags = None):
#transfer labels from old segmentation to new segmentation
mins_old = old_bboxes["Coord<Minimum>"]
maxs_old = old_bboxes["Coord<Maximum>"]
mins_new = new_bboxes["Coord<Minimum>"]
maxs_new = new_bboxes["Coord<Maximum>"]
nobj_old = mins_old.shape[0]
nobj_new = mins_new.shape[0]
if axistags is None:
axistags = "xyz"
data2D = False
if mins_old.shape[1]==2:
data2D = True
class bbox():
def __init__(self, minmaxs, axistags):
self.xmin = minmaxs[0][axistags.index('x')]
self.ymin = minmaxs[0][axistags.index('y')]
if not data2D:
self.zmin = minmaxs[0][axistags.index('z')]
else:
self.zmin = 0
self.xmax = minmaxs[1][axistags.index('x')]
self.ymax = minmaxs[1][axistags.index('y')]
if not data2D:
self.zmax = minmaxs[1][axistags.index('z')]
else:
self.zmax = 0
self.rad_x = 0.5*(self.xmax - self.xmin)
self.cent_x = self.xmin+self.rad_x
self.rad_y = 0.5*(self.ymax-self.ymin)
self.cent_y = self.ymin+self.rad_y
self.rad_z = 0.5*(self.zmax-self.zmin)
self.cent_z = self.zmin+self.rad_z
@staticmethod
def overlap(bbox_tuple):
this = bbox_tuple[0]
that = bbox_tuple[1]
over_x = this.rad_x+that.rad_x - (abs(this.cent_x-that.cent_x))
over_y = this.rad_y+that.rad_y - (abs(this.cent_y-that.cent_y))
over_z = this.rad_z+that.rad_z - (abs(this.cent_z-that.cent_z))
if not data2D:
if over_x>0 and over_y>0 and over_z>0:
return over_x*over_y*over_z
else:
if over_x>0 and over_y>0:
return over_x*over_y
return 0
nonzeros = numpy.nonzero(old_labels)[0]
bboxes_old = [bbox(x, axistags) for x in zip(mins_old[nonzeros], maxs_old[nonzeros])]
bboxes_new = [bbox(x, axistags) for x in zip(mins_new, maxs_new)]
#remove background
#FIXME: assuming background is 0 again
bboxes_new = bboxes_new[1:]
double_for_loop = itertools.product(bboxes_old, bboxes_new)
overlaps = map(bbox.overlap, double_for_loop)
overlaps = numpy.asarray(overlaps)
overlaps = overlaps.reshape((len(bboxes_old), len(bboxes_new)))
new_labels = numpy.zeros((nobj_new,), dtype=numpy.uint32)
old_labels_lost = dict()
old_labels_lost["full"]=[]
old_labels_lost["partial"]=[]
new_labels_lost = dict()
new_labels_lost["conflict"]=[]
for iobj in range(overlaps.shape[0]):
#take the object with maximum overlap
overlapsum = numpy.sum(overlaps[iobj, :])
if overlapsum==0:
old_labels_lost["full"].append((bboxes_old[iobj].cent_x, bboxes_old[iobj].cent_y, bboxes_old[iobj].cent_z))
continue
newindex = numpy.argmax(overlaps[iobj, :])
if overlapsum-overlaps[iobj,newindex]>0:
#this object overlaps with more than one new object
old_labels_lost["partial"].append((bboxes_old[iobj].cent_x, bboxes_old[iobj].cent_y, bboxes_old[iobj].cent_z))
overlaps[iobj, :] = 0
overlaps[iobj, newindex] = 1 #doesn't matter what number>0
for iobj in range(overlaps.shape[1]):
labels = numpy.where(overlaps[:, iobj]>0)[0]
if labels.shape[0]==1:
new_labels[iobj+1]=old_labels[nonzeros[labels[0]]] #iobj+1 because of the background
elif labels.shape[0]>1:
new_labels_lost["conflict"].append((bboxes_new[iobj].cent_x, bboxes_new[iobj].cent_y, bboxes_new[iobj].cent_z))
new_labels = new_labels
new_labels[0]=0 #FIXME: hardcoded background value again
return new_labels, old_labels_lost, new_labels_lost
def exportLabelInfo(self, file_path):
"""
For all images with labels, export object bounding boxes and label classes as JSON.
"""
import json
import collections
from functools import partial
logger.info("Exporting label information as json to: {}".format( file_path ))
json_data_all_lanes = collections.OrderedDict()
for lane_index, (label_slot, object_feature_slot) in enumerate(zip(self.LabelInputs, self.ObjectFeatures)):
logger.info("Processing image #{}".format( lane_index ))
json_data_this_lane = collections.OrderedDict()
labels_timewise = label_slot.value
for t in sorted(labels_timewise.keys()):
labels = labels_timewise[t]
if not any(labels):
continue
object_features_timewise = object_feature_slot([t]).wait()
object_features = object_features_timewise[t]
min_coords = object_features["Default features"]["Coord<Minimum>"]
max_coords = object_features["Default features"]["Coord<Maximum>"]
# Don't bother saving info for objects that aren't labeled
min_coords = min_coords[labels.nonzero()]
max_coords = max_coords[labels.nonzero()]
labels = labels[labels.nonzero()]
json_data_this_time = collections.OrderedDict()
bounding_boxes = collections.OrderedDict()
# Convert from numpy array to list (for json)
bounding_boxes["Coord<Minimum>"] = map( partial(map, int), min_coords )
bounding_boxes["Coord<Maximum>"] = map( partial(map, int), max_coords )
json_data_this_time["bounding_boxes"] = bounding_boxes
json_data_this_time["labels"] = map(int, labels)
json_data_this_lane[int(t)] = json_data_this_time
json_data_all_lanes[lane_index] = json_data_this_lane
with open(file_path, 'w') as f:
json.dump(json_data_all_lanes, f)
logger.info("Label export FINISHED.")
def importLabelInfo(self, file_path):
"""
Read labels and bounding boxes from a JSON file.
For all image lanes in the JSON file, replace all labels in that image.
For image lanes NOT listed in the JSON file, keep the existing labels.
"""
import json
logger.info("Reading label information from json: {}".format( file_path ))
with open(file_path, 'r') as f:
json_data_all_lanes = json.load(f)
max_label = 0
new_labels_all_lanes = {}
for lane_index_str in sorted(json_data_all_lanes.keys(), key=int):
lane_index = int(lane_index_str)
logger.info("Processing image #{}".format( lane_index ))
json_data_this_lane = json_data_all_lanes[lane_index_str]
new_labels_this_lane = {}
for time_str in sorted(json_data_this_lane.keys(), key=int):
time = int(time_str)
old_features_timewise = self.ObjectFeatures[lane_index]([time]).wait()
old_features = old_features_timewise[time]
current_bboxes = {}
current_bboxes["Coord<Minimum>"] = old_features["Default features"]["Coord<Minimum>"]
current_bboxes["Coord<Maximum>"] = old_features["Default features"]["Coord<Maximum>"]
json_data_this_time = json_data_this_lane[time_str]
saved_labels = numpy.array( json_data_this_time["labels"] )
max_label = max( max_label, saved_labels.max() )
saved_bboxes = {}
saved_bboxes["Coord<Minimum>"] = numpy.array( json_data_this_time["bounding_boxes"]["Coord<Minimum>"] )
saved_bboxes["Coord<Maximum>"] = numpy.array( json_data_this_time["bounding_boxes"]["Coord<Maximum>"] )
# Calculate new labels
newlabels, oldlost, newlost = OpObjectClassification.transferLabels(saved_labels, saved_bboxes, current_bboxes, None)
new_labels_this_lane[time] = newlabels
logger.info("Lane {}, time {} new labels: {}".format( lane_index, time, list(newlabels) ))
logger.info("Lane {}, time {} lost OLD: {}".format( lane_index, time, oldlost ))
logger.info("Lane {}, time {} lost NEW: {}".format( lane_index, time, newlost ))
# Apply new labels
new_labels_all_lanes[lane_index] = new_labels_this_lane
# If we have a new max label, add label classes as needed.
label_names = self.LabelNames.value
if len(self.LabelNames.value) < max_label:
new_label_names = list(label_names)
for class_index in range( len(label_names)+1, int(max_label)+1 ):
new_label_names.append( "Label {}".format( class_index ) )
self.LabelNames.setValue( new_label_names )
for lane_index, new_labels_timewise in sorted(new_labels_all_lanes.items()):
for t in range( self.SegmentationImages[lane_index].meta.getTaggedShape()['t'] ):
if t not in new_labels_timewise:
# No replacement labels. Copy old labels.
new_labels_timewise[t] = self.LabelInputs[lane_index].value[t]
logger.info("Applying new labels to lane {}".format( lane_index ))
self.LabelInputs[lane_index].setValue(new_labels_timewise)
logger.info("Label import FINISHED")
def createExportTable(self, lane, roi):
numLanes = len(self.SegmentationImages)
assert lane < numLanes, \
"Can't export features for lane {} (only {} lanes exist)"\
.format( lane, numLanes )
return self.opPredict[lane].createExportTable(roi)
def addLane(self, laneIndex):
numLanes = len(self.SegmentationImages)
assert numLanes == laneIndex, "Image lanes must be appended."
for slot in self.inputs.values():
if slot.level > 0 and len(slot) == laneIndex:
slot.resize(numLanes + 1)
def removeLane(self, laneIndex, finalLength):
for slot in self.inputs.values():
if slot.level > 0 and len(slot) == finalLength + 1:
slot.removeSlot(laneIndex, finalLength)
try:
self._ambiguousLabels.pop(laneIndex)
self._labelBBoxes.pop(laneIndex)
except:
#FIXME: sometimes this pop is called for no reason and makes the project unusable. We should fix the underlying issue.
pass
def getLane(self, laneIndex):
return OperatorSubView(self, laneIndex)
def _atleast_nd(a, ndim):
"""Like numpy.atleast_1d and friends, but supports arbitrary ndim,
always puts extra dimensions last, and resizes.
"""
if ndim < a.ndim:
return
nnew = ndim - a.ndim
newshape = tuple(list(a.shape) + [1] * nnew)
a.resize(newshape)
def _concatenate(arrays, axis):
"""wrapper to numpy.concatenate that resizes arrays first."""
arrays = list(a for a in arrays if 0 not in a.shape)
if len(arrays) == 0:
return numpy.array([])
maxd = max(max(a.ndim for a in arrays), 2)
for a in arrays:
_atleast_nd(a, maxd)
return numpy.concatenate(arrays, axis=axis)
def make_feature_array(feats, selected, labels=None):
featlist = []
labellist = []
row_names = []
col_names = []
for t in sorted(feats.keys()):
featsMatrix_tmp = []
index = None
if labels is not None:
labellist_tmp = []
lab = labels[t].squeeze()
index = numpy.nonzero(lab)
labellist_tmp.append(lab[index])
timestep_col_names = []
for plugin in sorted(feats[t].keys()):
if plugin == default_features_key or plugin not in selected:
continue
for featname in sorted(feats[t][plugin].keys()):
if featname not in selected[plugin]:
continue
value = feats[t][plugin][featname]
ft = numpy.asarray(value.squeeze())
if index is not None:
ft = ft[index]
featsMatrix_tmp.append(ft)
timestep_col_names.extend([(plugin, featname)] * value.shape[1])
if not col_names:
col_names = timestep_col_names
elif col_names != timestep_col_names:
raise Exception('different time slices did not have same features.')
#FIXME: we can do it all with just arrays
featsMatrix_tmp_combined = _concatenate(featsMatrix_tmp, axis=1)
featlist.append(featsMatrix_tmp_combined)
if index is not None:
row_names.extend(list((t, obj) for obj in index[0]))
if labels is not None:
labellist_tmp_combined = _concatenate(labellist_tmp, axis=1)
labellist.append(labellist_tmp_combined)
featMatrix = _concatenate(featlist, axis=0)
if labels is not None:
labelsMatrix = _concatenate(labellist, axis=0)
assert labelsMatrix.shape[0] == featMatrix.shape[0]
return featMatrix, row_names, col_names, labelsMatrix
return featMatrix, row_names, col_names
def replace_missing(a):
rows, cols = numpy.where(numpy.isnan(a) + numpy.isinf(a))
idx = (rows, cols)
rows = list(set(rows.flat))
cols = list(set(cols.flat))
a[idx] = MISSING_VALUE
return rows, cols
class OpObjectTrain(Operator):
"""Trains a random forest on all labeled objects."""
name = "TrainRandomForestObjects"
description = "Train a random forest on multiple images"
category = "Learning"
Labels = InputSlot(level=1, stype=Opaque, rtype=List)
LabelsCount = InputSlot(stype='int')
Features = InputSlot(level=1, rtype=List, stype=Opaque)
SelectedFeatures = InputSlot(rtype=List, stype=Opaque)
FixClassifier = InputSlot(stype="bool")
ForestCount = InputSlot(stype="int", value=1)
Classifier = OutputSlot()
BadObjects = OutputSlot(stype=Opaque)
def __init__(self, *args, **kwargs):
super(OpObjectTrain, self).__init__(*args, **kwargs)
self._tree_count = 100
self.FixClassifier.setValue(False)
def setupOutputs(self):
if self.FixClassifier.value == False:
self.Classifier.meta.dtype = object
self.Classifier.meta.shape = (1,)
self.Classifier.meta.axistags = None
self.BadObjects.meta.shape = (1,)
self.BadObjects.meta.dtype = object
self.BadObjects.meta.axistags = None
def execute(self, slot, subindex, roi, result):
featList = []
all_col_names = []
labelsList = []
# get the number of ALL labels
numLabels=0
if self.LabelsCount.ready():
numLabels = self.LabelsCount[:].wait()
numLabels = int(numLabels[0])
# will be available at slot self.Warnings
all_bad_objects = defaultdict(lambda: defaultdict(list))
all_bad_feats = set()
selected = self.SelectedFeatures([]).wait()
if len(selected)==0:
# no features - no predictions
self.Classifier.setValue(None)
return
for i in range(len(self.Labels)):
# this loop is by image, not time!
# TODO: we should be able to use self.Labels[i].value,
# but the current implementation of Slot.value() does not
# do the right thing.
labels_image = self.Labels[i]([]).wait()
labels_image_filtered = {}
nztimes = []
for timestep, labels_time in labels_image.iteritems():
nz = numpy.nonzero(labels_time)
if len(nz[0])==0:
continue
else:
nztimes.append(timestep)
labels_image_filtered[timestep] = labels_time
if len(nztimes)==0:
continue
# compute the features if there are nonzero labels in this image
# and only for the time steps, which have labels
feats = self.Features[i](nztimes).wait()
featstmp, row_names, col_names, labelstmp = make_feature_array(feats, selected, labels_image_filtered)
if labelstmp.size == 0 or featstmp.size == 0:
continue
rows, cols = replace_missing(featstmp)
featList.append(featstmp)
all_col_names.append(tuple(col_names))
labelsList.append(labelstmp)
for idx in rows:
t, obj = row_names[idx]
all_bad_objects[i][t].append(obj)
for c in cols:
all_bad_feats.add(col_names[c])
if len(labelsList)==0:
#no labels, return here
self.Classifier.setValue(None)
return
self._warnBadObjects(all_bad_objects, all_bad_feats)
if not len(set(all_col_names)) == 1:
raise Exception('different time slices did not have same features.')
featMatrix = _concatenate(featList, axis=0)
labelsMatrix = _concatenate(labelsList, axis=0)
logger.info("training on matrix of shape {}".format(featMatrix.shape))
if featMatrix.size == 0 or labelsMatrix.size == 0:
result[:] = None
return
allLabels=map(long, range(1,numLabels+1))
classifier_factory = ParallelVigraRfLazyflowClassifierFactory( self._tree_count, self.ForestCount.value, labels=allLabels )
classifier = classifier_factory.create_and_train( featMatrix.astype(numpy.float32), numpy.asarray(labelsMatrix, dtype=numpy.uint32) )
avg_oob = numpy.mean(classifier.oobs)
logger.info("training finished, average out-of-bag error: {}".format(avg_oob))
result[0] = classifier
return result
def propagateDirty(self, slot, subindex, roi):
if slot is not self.FixClassifier and \
self.inputs["FixClassifier"].value == False:
slcs = (slice(0, self.ForestCount.value, None),)
self.outputs["Classifier"].setDirty(slcs)
def _warnBadObjects(self, bad_objects, bad_feats):
if len(bad_feats) > 0 or\
any([len(bad_objects[i]) > 0 for i in bad_objects.keys()]):
self.BadObjects.setValue({'objects': bad_objects,
'feats': bad_feats})
class OpObjectPredict(Operator):
"""Predicts object labels in a single image.
Performs prediction on all objects in a time slice at once, and
caches the result.
"""
# WARNING: right now we predict and cache a whole time slice. We
# expect this to be fast because there are relatively few objects
# compared to the number of pixels in pixel classification. If
# this should be too slow, we should instead cache at the object
# level, and only predict for objects visible in the roi.
name = "OpObjectPredict"
Features = InputSlot(rtype=List, stype=Opaque)
SelectedFeatures = InputSlot(rtype=List, stype=Opaque)
Classifier = InputSlot()
LabelsCount = InputSlot(stype='integer')
InputProbabilities = InputSlot(stype=Opaque, rtype=List, optional=True)
Predictions = OutputSlot(stype=Opaque, rtype=List)
Probabilities = OutputSlot(stype=Opaque, rtype=List)
CachedProbabilities = OutputSlot(stype=Opaque, rtype=List)
ProbabilityChannels = OutputSlot(stype=Opaque, rtype=List, level=1)
BadObjects = OutputSlot(stype=Opaque, rtype=List)
#SegmentationThreshold = 0.5
def setupOutputs(self):
self.Predictions.meta.shape = self.Features.meta.shape
self.Predictions.meta.dtype = object
self.Predictions.meta.axistags = None
self.Predictions.meta.mapping_dtype = numpy.uint8
self.Probabilities.meta.shape = self.Features.meta.shape
self.Probabilities.meta.dtype = object
self.Probabilities.meta.mapping_dtype = numpy.float32
self.Probabilities.meta.axistags = None
self.BadObjects.meta.shape = self.Features.meta.shape
self.BadObjects.meta.dtype = object
self.BadObjects.meta.mapping_dtype = numpy.uint8
self.BadObjects.meta.axistags = None
if self.LabelsCount.ready():
nlabels = self.LabelsCount[:].wait()
nlabels = int(nlabels[0])
self.ProbabilityChannels.resize(nlabels)
for oslot in self.ProbabilityChannels:
oslot.meta.shape = self.Features.meta.shape
oslot.meta.dtype = object
oslot.meta.axistags = None
oslot.meta.mapping_dtype = numpy.float32
self.lock = RequestLock()
self.prob_cache = dict()
self.bad_objects = dict()
def execute(self, slot, subindex, roi, result):
assert slot in [self.Predictions,
self.Probabilities,
self.CachedProbabilities,
self.ProbabilityChannels,
self.BadObjects]
times = roi._l
if len(times) == 0:
# we assume that 0-length requests are requesting everything
times = range(self.Predictions.meta.shape[0])
if slot is self.CachedProbabilities:
return {t: self.prob_cache[t] for t in times if t in self.prob_cache}
classifier = self.Classifier.value
if classifier is None:
# this happens if there was no data to train with
return dict((t, numpy.array([])) for t in times)
feats = {}
prob_predictions = {}
selected = self.SelectedFeatures([]).wait()
# FIXME: self.prob_cache is shared, so we need to block.
# However, this makes prediction single-threaded.
with self.lock:
for t in times:
if t in self.prob_cache:
continue
tmpfeats = self.Features([t]).wait()
ftmatrix, _, col_names = make_feature_array(tmpfeats, selected)
rows, cols = replace_missing(ftmatrix)
self.bad_objects[t] = numpy.zeros((ftmatrix.shape[0],))
self.bad_objects[t][rows] = 1
feats[t] = ftmatrix
prob_predictions[t] = 0
def predict_forest(_t):
# Note: We can't use RandomForest.predictLabels() here because we're training in parallel,
# and we have to average the PROBABILITIES from all forests.
# Averaging the label predictions from each forest is NOT equivalent.
# For details please see wikipedia:
# http://en.wikipedia.org/wiki/Electoral_College_%28United_States%29#Irrelevancy_of_national_popular_vote
# (^-^)
prob_predictions[_t] = classifier.predict_probabilities(feats[_t].astype(numpy.float32))
# predict the data with all the forests in parallel
pool = RequestPool()
for t in times:
if t in self.prob_cache:
continue
req = Request( partial(predict_forest, t) )
pool.add(req)
pool.wait()
pool.clean()
for t in times:
if t not in self.prob_cache:
# prob_predictions is a dict-of-arrays, indexed as follows:
# prob_predictions[t][object_index, class_index]
self.prob_cache[t] = prob_predictions[t]
self.prob_cache[t][0] = 0 # Background probability is always zero
if slot == self.Probabilities:
return { t : self.prob_cache[t] for t in times }
elif slot == self.Predictions:
# FIXME: Support SegmentationThreshold again...
labels = dict()
for t in times:
prob_sum = numpy.sum(self.prob_cache[t], axis=1)
labels[t] = 1 + numpy.argmax(self.prob_cache[t], axis=1)
labels[t][0] = 0 # Background gets the zero label
return labels
elif slot == self.ProbabilityChannels:
try:
prob_single_channel = {t: self.prob_cache[t][:, subindex[0]]
for t in times}
except:
# no probabilities available for this class; return zeros
prob_single_channel = {t: numpy.zeros((self.prob_cache[t].shape[0], 1))
for t in times}
return prob_single_channel
elif slot == self.BadObjects:
return { t : self.bad_objects[t] for t in times }
else:
assert False, "Unknown input slot"
def propagateDirty(self, slot, subindex, roi):
self.prob_cache = {}
if slot is self.InputProbabilities:
self.prob_cache = self.InputProbabilities([]).wait()
self.Predictions.setDirty(())
self.Probabilities.setDirty(())
self.ProbabilityChannels.setDirty(())
def createExportTable(self, roi):
if not self.Predictions.ready() or not self.Features.ready():
return None
features = self.Features(roi).wait()
feature_table = OpObjectExtraction.createExportTable(features)
predictions = self.Predictions(roi).wait()
probs = self.Probabilities(roi).wait()
nobjs = []
for t, preds in predictions.iteritems():
nobjs.append(preds.shape[0])
nobjs_total = sum(nobjs)
if nobjs_total==0:
logger.info("Prediction not run yet, won't be exported")
return feature_table
else:
assert nobjs_total==feature_table.shape[0]
def fill_column(slot_value, column, name, channel=None):
start = 0
finish = start
for t, values in slot_value.iteritems():
#FIXME: remove the first object, it's always background
finish = start + nobjs[t]
if channel is None:
column[name][start:finish] = values[:]
else:
column[name][start:finish] = values[:, channel]
start = finish
pred_column = numpy.zeros(nobjs_total, {'names': ['Prediction'], 'formats': [numpy.dtype(numpy.uint8)]})
fill_column(predictions, pred_column, "Prediction")
joint_table = rfn.merge_arrays((feature_table, pred_column), flatten = True, usemask = False)
nchannels = probs[0].shape[-1]
columns = [feature_table, pred_column]
for ich in range(nchannels):
prob_column = numpy.zeros(nobjs_total, {'names': ['Probability of class %d'%ich], \
'formats': [numpy.dtype(numpy.float32)]})
fill_column(probs, prob_column, 'Probability of class %d'%ich, ich)
columns.append(prob_column)
joint_table = rfn.merge_arrays(columns, flatten = True, usemask = False)
return joint_table
class OpRelabelSegmentation(Operator):
"""Takes a segmentation image and a mapping and returns the
mapped image.
For instance, map prediction labels onto objects.
"""
name = "OpToImage"
Image = InputSlot()
ObjectMap = InputSlot(stype=Opaque, rtype=List)
Features = InputSlot(rtype=List, stype=Opaque) #this is needed to limit dirty propagation to the object bbox
Output = OutputSlot()
loggingName = __name__ + ".OpRelabelSegmentation"
logger = logging.getLogger(loggingName)
def setupOutputs(self):
self.Output.meta.assignFrom(self.Image.meta)
self.Output.meta.dtype = self.ObjectMap.meta.mapping_dtype
def execute(self, slot, subindex, roi, result):
tStart = time.time()
tIMG = time.time()
img = self.Image(roi.start, roi.stop).wait()
tIMG = 1000.0*(time.time()-tIMG)
for t in range(roi.start[0], roi.stop[0]):
tMAP = time.time()
map_ = self.ObjectMap([t]).wait()
tmap = map_[t]
# FIXME: necessary because predictions are returned
# enclosed in a list.
if isinstance(tmap, list):
tmap = tmap[0]
tmap = tmap.squeeze()
if tmap.ndim==0:
# no objects, nothing to paint
result[t-roi.start[0]][:] = 0
return result
tMAP = 1000.0*(time.time()-tMAP)
#FIXME: This should be cached (and reset when the input becomes dirty)")
tMAX = time.time()
idx = img.max()
if len(tmap) <= idx:
newTmap = numpy.zeros((idx + 1,)) # And maybe this should be cached, too?
newTmap[:len(tmap)] = tmap[:]
tmap = newTmap
tMAX = 1000.0*(time.time()-tMAX)
#do the work thing
tWORK = time.time()
result[t-roi.start[0]] = tmap[img[t-roi.start[0]]]
tWORK = 1000.0*(time.time()-tWORK)
if self.logger.getEffectiveLevel() >= logging.DEBUG:
tStart = 1000.0*(time.time()-tStart)
self.logger.debug("took %f msec. (img: %f, wait ObjectMap: %f, do work: %f, max: %f)" % (tStart, tIMG, tMAP, tWORK, tMAX))
return result
def propagateDirty(self, slot, subindex, roi):
if slot is self.Image:
self.Output.setDirty(roi)
elif slot is self.ObjectMap or slot is self.Features:
# this is hacky. the gui's onClick() function calls
# setDirty with a (time, object) pair, while elsewhere we
# call setDirty with ().
if len(roi._l) == 0:
self.Output.setDirty(slice(None))
elif isinstance(roi._l[0], int):
for t in roi._l:
self.Output.setDirty(slice(t))
else:
assert len(roi._l[0]) == 2
# for each dirty object, only set its bounding box dirty
ts = list(set(t for t, _ in roi._l))
feats = self.Features(ts).wait()
for t, obj in roi._l:
min_coords = feats[t][default_features_key]['Coord<Minimum>'][obj].astype(numpy.uint32)
max_coords = feats[t][default_features_key]['Coord<Maximum>'][obj].astype(numpy.uint32)
slcs = list(slice(*args) for args in zip(min_coords, max_coords))
slcs = [slice(t, t+1),] + slcs + [slice(None),]
self.Output.setDirty(slcs)
class OpMultiRelabelSegmentation(Operator):
"""Takes a segmentation image and multiple mappings and returns the
mapped images.
For instance, map prediction probabilities for different classes
onto objects.
"""
name = "OpToImageMulti"
Image = InputSlot()
ObjectMaps = InputSlot(stype=Opaque, rtype=List, level=1)
Features = InputSlot(rtype=List, stype=Opaque) #this is needed to limit dirty propagation to the object bbox
Output = OutputSlot(level=1)
def __init__(self, *args, **kwargs):
super(OpMultiRelabelSegmentation, self).__init__(*args, **kwargs)
self._innerOperators = []
def setupOutputs(self):
nmaps = len(self.ObjectMaps)
for islot in self.ObjectMaps:
op = OpRelabelSegmentation(parent=self)
op.Image.connect(self.Image)
op.ObjectMap.connect(islot)
op.Features.connect(self.Features)
self._innerOperators.append(op)
self.Output.resize(nmaps)
for i, oslot in enumerate(self.Output):
oslot.connect(self._innerOperators[i].Output)
def propagateDirty(self, slot, subindex, roi):
pass
class OpMaxLabel(Operator):
"""Finds the maximum label value in the input labels.
Special operator for object classification labels, expects
inputs to be in a dictionary
"""
name = "OpMaxLabel"
Inputs = InputSlot(level=1,rtype=List, stype=Opaque)
Output = OutputSlot()
def __init__(self, *args, **kwargs):
super(OpMaxLabel, self).__init__(*args, **kwargs)
self.Output.meta.shape = (1,)
self.Output.meta.dtype = object
self._output = 0 #internal cache
def setupOutputs(self):
self.updateOutput()
self.Output.setValue(self._output)
def execute(self, slot, subindex, roi, result):
result[0] = self._output
return result
def propagateDirty(self, inputSlot, subindex, roi):
self.updateOutput()
self.Output.setValue(self._output)
def updateOutput(self):
# Return the max value of all our inputs
maxValue = None
for i, inputSubSlot in enumerate(self.Inputs):
subSlotLabelDict = self.Inputs[i][:].wait()
for v in subSlotLabelDict.itervalues():
subSlotMax = numpy.max(v)
if maxValue is None:
maxValue = subSlotMax
else:
maxValue = max(maxValue, subSlotMax)
self._output = int(maxValue)
class OpBadObjectsToWarningMessage(Operator):
"""Parses an input dictionary of bad objects and bad features, and
sets an informative warning message to its output slot.
"""
name = "OpBadObjectsToWarningMessage"
_blockSep = "\n\n"
_itemSep = "\n"
_objectSep = ", "
_itemIndent = " "
# the input slot
# format: BadObjects.value = {
# 'objects':
# {img_key: {time_key: [obj_index, obj_index_2, ...]}},
# 'feats':
# set()
# }
BadObjects = InputSlot(stype=Opaque)
# the output slot
# format: WarningMessage.value =
# {'title': a, 'text': b, 'info': c, 'details': d} if message available, the keys 'info' and 'details' might be omitted
# {} otherwise
WarningMessage = OutputSlot(stype=Opaque)
def setupOutputs(self):
pass
def propagateDirty(self, slot, subindex, roi):
try:
d = self.BadObjects[:].wait()
except AssertionError as E:
if "has no value" in str(E):
# since we are in propagateDirty, the input got reset or we got disconnected, either case
# means we don't have to issue warnings any more
return
# don't know what this is about, raise again
raise
warn = {}
warn['title'] = 'Warning'
warn['text'] = 'Encountered bad objects/features while training.'
warn['details'] = self._formatMessage(d)
if len(warn['details']) == 0:
return
self.WarningMessage.setValue(warn)
def execute(self, slot, subindex, roi, result):
pass
def _formatMessage(self, d):
a = []
try:
keys = d.keys()
# a) objects
if 'objects' in keys:
keys.remove('objects')
s = self._formatObjects(d['objects'])
if len(s) > 0:
a.append(s)
# b) features
if 'feats' in keys:
keys.remove('feats')
s = self._formatFeatures(d['feats'])
if len(s)>0:
a.append(s)
if len(keys)>0:
logger.warning("Encountered unknown bad object keywords: {}".format(keys))
except AttributeError:
raise TypeError("Expected input to be a dictionary, got {}".format(type(d)))
return self._blockSep.join(a)
def _formatFeatures(self, f):
try:
a = self._itemSep.join(map(str, sorted(f)))
except TypeError:
raise TypeError("Expected bad features to be a set, got {}".format(type(f)))
if len(a)>0:
a = "The following features had bad values:" + self._itemSep + a
return a
def _formatObjects(self, obj):
a = []
indent = 1
try:
# loop image indices
for img in obj.keys():
imtext = self._itemIndent*indent + "at image index {}".format(img)
indent += 1
# just show time slice if more than 1 time slice exists (avoid confusion/obfuscation)
needTime = len(obj[img].keys())>1
b = []
# loop time values
for t in obj[img].keys():
# object numbers
c = self._objectSep.join(map(str,obj[img][t]))
if len(c)>0:
c = self._itemIndent*indent + "Objects " + c
if needTime:
c = self._itemIndent*indent + "at time {}".format(t) + self._itemSep + self._itemIndent + c
b.append(c)
indent -= 1
if len(b)>0:
a.append(self._itemSep.join([imtext] + b))
except AttributeError:
raise TypeError("bad objects dictionary has wrong format.")
if len(a)>0:
return self._itemSep.join(["The following objects had bad features:"] +a)
else:
return ""
|
gpl-3.0
|
direvus/ansible
|
lib/ansible/modules/cloud/amazon/ecs_cluster.py
|
25
|
7340
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_cluster
short_description: create or terminate ecs clusters
notes:
- When deleting a cluster, the information returned is the state of the cluster prior to deletion.
- It will also wait for a cluster to have instances registered to it.
description:
- Creates or terminates ecs clusters.
version_added: "2.0"
author: Mark Chance (@Java1Guy)
requirements: [ boto3 ]
options:
state:
description:
- The desired state of the cluster
required: true
choices: ['present', 'absent', 'has_instances']
name:
description:
- The cluster name
required: true
delay:
description:
- Number of seconds to wait
required: false
repeat:
description:
- The number of times to wait for the cluster to have an instance
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Cluster creation
- ecs_cluster:
name: default
state: present
# Cluster deletion
- ecs_cluster:
name: default
state: absent
- name: Wait for register
ecs_cluster:
name: "{{ new_cluster }}"
state: has_instances
delay: 10
repeat: 10
register: task_output
'''
RETURN = '''
activeServicesCount:
description: how many services are active in this cluster
returned: 0 if a new cluster
type: int
clusterArn:
description: the ARN of the cluster just created
type: string
returned: 0 if a new cluster
sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
clusterName:
description: name of the cluster just created (should match the input argument)
type: string
returned: always
sample: test-cluster-mfshcdok
pendingTasksCount:
description: how many tasks are waiting to run in this cluster
returned: 0 if a new cluster
type: int
registeredContainerInstancesCount:
description: how many container instances are available in this cluster
returned: 0 if a new cluster
type: int
runningTasksCount:
description: how many tasks are running in this cluster
returned: 0 if a new cluster
type: int
status:
description: the status of the new cluster
returned: always
type: string
sample: ACTIVE
'''
import time
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
class EcsClusterManager:
"""Handles ECS Clusters"""
def __init__(self, module):
self.module = module
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.ecs = boto3_conn(module, conn_type='client', resource='ecs',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
for c in array_of_clusters:
if c[field_name].endswith(cluster_name):
return c
return None
def describe_cluster(self, cluster_name):
response = self.ecs.describe_clusters(clusters=[
cluster_name
])
if len(response['failures']) > 0:
c = self.find_in_array(response['failures'], cluster_name, 'arn')
if c and c['reason'] == 'MISSING':
return None
# fall thru and look through found ones
if len(response['clusters']) > 0:
c = self.find_in_array(response['clusters'], cluster_name)
if c:
return c
raise Exception("Unknown problem describing cluster %s." % cluster_name)
def create_cluster(self, clusterName='default'):
response = self.ecs.create_cluster(clusterName=clusterName)
return response['cluster']
def delete_cluster(self, clusterName):
return self.ecs.delete_cluster(cluster=clusterName)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'has_instances']),
name=dict(required=True, type='str'),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10)
))
required_together = [['state', 'name']]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
cluster_mgr = EcsClusterManager(module)
try:
existing = cluster_mgr.describe_cluster(module.params['name'])
except Exception as e:
module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e))
results = dict(changed=False)
if module.params['state'] == 'present':
if existing and 'status' in existing and existing['status'] == "ACTIVE":
results['cluster'] = existing
else:
if not module.check_mode:
# doesn't exist. create it.
results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
results['changed'] = True
# delete the cluster
elif module.params['state'] == 'absent':
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['cluster'] = existing
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
cluster_mgr.delete_cluster(module.params['name'])
results['changed'] = True
elif module.params['state'] == 'has_instances':
if not existing:
module.fail_json(msg="Cluster '" + module.params['name'] + " not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
delay = module.params['delay']
repeat = module.params['repeat']
time.sleep(delay)
count = 0
for i in range(repeat):
existing = cluster_mgr.describe_cluster(module.params['name'])
count = existing['registeredContainerInstancesCount']
if count > 0:
results['changed'] = True
break
time.sleep(delay)
if count == 0 and i is repeat - 1:
module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
return
module.exit_json(**results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
dbjohnson/spanner
|
test/test_countdown.py
|
1
|
1191
|
import unittest
import sys
from cStringIO import StringIO
from spanner import countdown
include = True
class CountdownTestCase(unittest.TestCase):
def setUp(self):
# redirect stdout
self.old_stdout = sys.stdout
self.trapped_stdout = sys.stdout = StringIO()
def tearDown(self):
# reset stdout
sys.stdout = self.old_stdout
def test_normal_function(self):
iterable = range(100)
timer = countdown.timer_for_iterable(iterable)
self.assertTrue(timer.total_iterations == len(iterable))
for i in iterable:
timer.tick()
output = self.trapped_stdout.getvalue()
self.assertTrue('%d loops completed' % len(iterable) in output)
self.assertTrue(timer.iterations_elapsed == len(iterable))
def test_overrun(self):
iterable = range(100)
timer = countdown.timer_for_iterable(iterable)
self.assertTrue(timer.total_iterations == len(iterable))
for i in iterable:
timer.tick()
timer.tick()
output = self.trapped_stdout.getvalue()
sys.stdout = self.old_stdout
self.assertTrue('Timer overrun!' in output)
|
mit
|
nsmoooose/csp
|
csp/data/ui/scripts/windows/quitresume.py
|
1
|
2828
|
#!/usr/bin/python
# Combat Simulator Project
# Copyright (C) 2002-2005 The Combat Simulator Project
# http://csp.sourceforge.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Combat Simulator Project : Quit and resume window script
"""
import csp.cspsim
from csp.data.ui.scripts.utils import SlotManager
from csp.data.ui.scripts.windows.options import Options
class QuitResume(csp.cspsim.Window, SlotManager):
def __init__(self, cspsim):
csp.cspsim.Window.__init__(self)
SlotManager.__init__(self)
# Install the move window event handler.
self.moveEventHandler = csp.cspsim.ControlMoveEventHandler(self)
self.cspsim = cspsim
# Load the user interface for this window.
serializer = csp.cspsim.Serialization()
serializer.load(self, 'quit_resume.xml')
resumeButton = self.getById('resume')
if resumeButton != None:
self.connectToClickSignal(resumeButton, self.resume_Click)
endSimButton = self.getById('end_simulation')
if endSimButton != None:
self.connectToClickSignal(endSimButton, self.end_simulation_Click)
if self.cspsim.isPaused() == False:
self.cspsim.togglePause()
def resume_Click(self):
if self.cspsim.isPaused():
self.cspsim.togglePause()
topMenu = self.getWindowManager().getById('topMenuWindow')
if topMenu != None:
topMenu.close()
self.close()
def end_simulation_Click(self):
# Start by changing the screen. This makes it possible for us to
# display the main menu and the desktop.
self.getWindowManager().closeAll()
self.cspsim.displayMenuScreen()
# End the simulation by unloading everything.
self.cspsim.unloadSimulation()
# Use the UserInterfaceStartup class to return to the main menu
# and the desktop.
from csp.data.ui.scripts.startup import UserInterfaceStartup
startup = UserInterfaceStartup(self.cspsim) # TODO: the UserInterfaceStartup object created in sim.py still exist !!!
startup.run()
|
gpl-2.0
|
ferchaure/bci
|
Parsers/beta_fpga.py
|
1
|
7945
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 17:28:58 2015
@author: fernando chaure
"""
from configuration import GENERAL_CONFIG as CONFIG
from multiprocess_config import *
import numpy as np
from configuration import CONFIG_PARSER
from os import path
COMM = {}
for x in CONFIG_PARSER['FORMAT_CONFIG']:
if CONFIG_PARSER['FORMAT_CONFIG'][x].isdigit():
COMM[x] = int(CONFIG_PARSER['FORMAT_CONFIG'][x])
else:
COMM[x] = CONFIG_PARSER['FORMAT_CONFIG'][x]
if not CONFIG['ONLINE_MODE']:
if CONFIG_PARSER.has_key("DATA_INFO"):
INFO = {}
for x in CONFIG_PARSER['DATA_INFO']:
if CONFIG_PARSER['DATA_INFO'][x].isdigit():
INFO[x] = int(CONFIG_PARSER['DATA_INFO'][x])
else:
INFO[x] = CONFIG_PARSER['DATA_INFO'][x]
from configuration import FILE_CONFIG
if path.exists(path.join(CONFIG_PARSER['FOLDER'],CONFIG_PARSER['FORMAT_CONFIG']['ch_order'])): #check if the string is a valid file name
CH_ORD = np.loadtxt(path.join(CONFIG_PARSER['FOLDER'],CONFIG_PARSER['FORMAT_CONFIG']['ch_order']),np.int)-1
else: #else should be a list of ints
CH_ORD = np.fromstring(CONFIG_PARSER['FORMAT_CONFIG']['ch_order'],np.int,sep=' ')-1
class Parser():
"""Une secciones de datos raw y crea la matriz que se pasara al siguiente proceso"""
def __init__(self,send_warnings,logging,data_in):
#define como sera el inicio de la trama en funcion de la cantidad de canales
dicc_aux = {1:'\x23''\xFF', 2:'\x46''\xFF'} #AUXILIAR
self.FFplus = np.fromstring(dicc_aux[COMM['ampcount']], np.int16)
self.logging = logging
self.counter =- 1 #el counter de la trama
self.frames_parsed = 0 #ubicacion en el bloque q se esta creando
self.data = data_in()
self.c_t = 0 #ubicacion en el bloque q se esta leyendo
self.sinc = 0
self.first_read = True
self.send_warnings = send_warnings
self.data_raw = np.ndarray(COMM['l_frame'] * CONFIG['PAQ_USB'] * 2, np.int16) #es el doble de grande que el que sera utilizado normalmente
if not CONFIG['ONLINE_MODE']:
self.open_file = None
self.file_reading = FILE_CONFIG['FORMAT_FILE']
self.unread_frames = 0
self.n_file = 0
def get_raw(self,extra_data):
new_pack_data = (CONFIG['PAQ_USB'] + extra_data) * COMM['l_frame']
return self.data_raw[:new_pack_data]
def online_update(self, data):
"""Recibe datos, los parsea. Si llena una trama fija retorna 0,
si faltan mas datos retorna cuantos, si sobran retorna -1"""
max_c_t = data.size / COMM['l_frame']
#si recien empieza con esos datos, primero sincroniza
if self.c_t == 0 :
self.sinc = 0
while self.sinc < COMM['l_frame']:
if (data[self.sinc] == self.FFplus) and not( reduce(lambda x,y: x^y, data[self.sinc:self.sinc+ COMM['l_frame']])):
#parsea:
self.data.channels[:, self.frames_parsed] = (data[COMM['channels_pos'] + self.sinc:self.sinc+COMM['channels_pos'] + CONFIG['#CHANNELS']]-2**15)[CH_ORD]
self.frames_parsed += 1
counter_old = self.counter
self.counter = (data[COMM['counter_pos'] + self.sinc])
if np.int16(counter_old+1) != self.counter:
self.logging.error(Errors_Messages[COUNTER_ERROR])
try:
self.send_warnings.put_nowait([COUNTER_ERROR,1])
except Queue_Full:
self.logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
#fin del priner parseo
break
self.sinc += 1
if(self.sinc >0 ):
max_c_t = max_c_t-1
if (self.sinc == COMM['l_frame']):
self.logging.error(Errors_Messages[CANT_SYNCHRONIZE])
try:
self.send_warnings.put_nowait([CANT_SYNCHRONIZE,CONFIG['PAQ_USB']])
except Queue_Full:
self.logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
self.sinc =0
self.c_t=0 #se concidera analizado y corrupto todo el paquete la proxima se empieza desde cero
return CONFIG['PAQ_USB'] - self.frames_parsed
else:
self.c_t+=1
#recorre hasta llenar tramas parseadas o parsear todo el bloque de datos crudos
while self.frames_parsed < CONFIG['PAQ_USB'] and self.c_t < max_c_t:
init_trama = self.c_t*COMM['l_frame']+self.sinc
if(data[init_trama] != self.FFplus): #desincronizado
#sinc=incronizar(c_t*40+sinc)# esto cambia c_t y s
#print "desincronizacion detectada"
self.logging.error(Errors_Messages[DATA_NONSYNCHRONIZED])
try:
self.send_warnings.put_nowait([DATA_NONSYNCHRONIZED,CONFIG['PAQ_USB']-self.c_t])
except Queue_Full:
self.logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
self.c_t = max_c_t #se concidera analizado y corrupto todo el paquete la proxima se empieza desde cero
break
#parsea:
self.data.channels[:, self.frames_parsed] = (data[init_trama + COMM['channels_pos']:init_trama + COMM['channels_pos'] + CONFIG['#CHANNELS']]-2**15)[CH_ORD]
#ojo aca se tendria q parsear y guardar el resto de la informacion q viene en la trama
self.frames_parsed += 1
counter_old = self.counter
self.counter = data[init_trama+COMM['counter_pos']]
#comparo counteres aviso
if np.int16(counter_old + 1) != self.counter:
#guardo discontinuidad!!!
#print "perdida de datos segun counter"
self.logging.error(Errors_Messages[COUNTER_ERROR])
try:
self.send_warnings.put_nowait([COUNTER_ERROR,1])
except Queue_Full:
self.logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
#fin del parseo
self.c_t += 1
if self.frames_parsed == CONFIG['PAQ_USB']:
self.frames_parsed = 0
#retorno cuanto falta parsear del bloque crudo
if self.c_t == max_c_t:
self.c_t = 0
return 0
else:
#me sobra data puedo volver a update
return -1
else:
#retorno cuanto le falta para terminar el bloque de canales
self.c_t = 0
return CONFIG['PAQ_USB'] - self.frames_parsed
def offline_update(self):
if (self.unread_frames == 0) and (self.n_file < INFO['files']):
self.n_file = self.n_file+1
self.open_file = open(FILE_CONFIG['FORMAT_FILE'][:-1]+str(self.n_file))
if self.n_file == INFO['files']:
self.unread_frames = INFO['samples4lastfile']
else:
self.unread_frames = INFO['samples4file']
elif (self.unread_frames == 0) and (self.n_file == INFO['files']):
return 1
if self.unread_frames >= CONFIG['PAQ_USB']:
self.new_data = np.fromfile(self.open_file,np.int16,CONFIG['PAQ_USB']*COMM['l_frame'])
self.unread_frames = self.unread_frames - CONFIG['PAQ_USB']
else:
self.new_data = np.fromfile(self.open_file,np.int16,self.unread_frames*COMM['l_frame'])
self.unread_frames = 0
if self.unread_frames == 0:
self.open_file.close()
return self.online_update(self.new_data)
|
mit
|
pynag/pynag
|
tests/test_utils.py
|
1
|
18547
|
from __future__ import absolute_import
import os
import sys
# Make sure we import from working tree
pynagbase = os.path.dirname(os.path.realpath(__file__ + "/.."))
sys.path.insert(0, pynagbase)
import unittest2 as unittest
from mock import patch
import shutil
import tempfile
import pynag.Utils as utils
import pynag.Model
from pynag.Utils import PynagError
from tests import tests_dir
import pynag.Utils.misc
class testUtils(unittest.TestCase):
def setUp(self):
# Utils should work fine with just about any data, but lets use
# testdata01
os.chdir(tests_dir)
os.chdir('dataset01')
pynag.Model.config = None
pynag.Model.cfg_file = './nagios/nagios.cfg'
pynag.Model.ObjectDefinition.objects.get_all()
self.tmp_dir = tempfile.mkdtemp() # Will be deleted after test runs
os.environ['LANG'] = 'en_US@UTF8'
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def testCompareFilterWithGrep(self):
""" test pynag.Utils.grep() by comparing it with pynag.Model.Service.objects.filter()
# TODO: Currently pynag.Model.Service.objects.filter() has some bugs, so some tests here fail.
"""
self._compare_search_expressions(use='generic-service')
self._compare_search_expressions(register=1, use='generic-service')
self._compare_search_expressions(host_name__exists=True)
self._compare_search_expressions(host_name__exists=False)
self._compare_search_expressions(host_name__notcontains='l')
self._compare_search_expressions(host_name__notcontains='this value cannot possibly exist')
self._compare_search_expressions(host_name__startswith='l')
self._compare_search_expressions(host_name__endswith='m')
self._compare_search_expressions(host_name__isnot='examplehost for testing purposes')
def testGrep(self):
""" Test cases based on gradecke's testing """
host = pynag.Model.string_to_class['host']()
host['use'] = "generic-host"
host['name'] = "ABC"
host['_code'] = "ABC"
host['_function'] = "Server,Production"
host2 = pynag.Model.string_to_class['host']()
host2['use'] = "generic-host"
host2['name'] = "XYZ"
host2['_code'] = "XYZ"
host2['_function'] = "Switch,Production"
hosts = host, host2
result = pynag.Utils.grep(hosts, **{'_code__contains': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__contains': 'BC'})
self.assertEqual(1, len(result))
# Check that contains does not match nonexisting values
result = pynag.Utils.grep(hosts, **{'_code__contains': ''})
self.assertEqual(2, len(result))
result = pynag.Utils.grep(hosts, **{'nonexistant__contains': ''})
self.assertEqual(0, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notcontains': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notcontains': 'BC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__startswith': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__startswith': 'AB'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notstartswith': 'AB'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__endswith': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__endswith': 'BC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notendswith': 'YZ'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__exists': True})
self.assertEqual(2, len(result))
result = pynag.Utils.grep(hosts, **{'_code__exists': False})
self.assertEqual(0, len(result))
result = pynag.Utils.grep(hosts, **{'_function__has_field': 'Server'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(
hosts, **{'_function__has_field': 'Production'})
self.assertEqual(2, len(result))
result = pynag.Utils.grep(hosts, **{'name__notcontains': 'A'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'name__regex': 'A.C'})
self.assertEqual(1, len(result))
self.assertEqual('ABC', result[0].name)
result = pynag.Utils.grep(hosts, **{'name__in': ['ABC', 'BCD']})
self.assertEqual(1, len(result))
self.assertEqual('ABC', result[0].name)
result = pynag.Utils.grep(hosts, **{'name__notin': ['ABC', 'BCD']})
self.assertEqual(1, len(result))
self.assertEqual('XYZ', result[0].name)
result = pynag.Utils.grep(hosts, **{'search': 'Switch'})
self.assertEqual(1, len(result))
self.assertEqual('XYZ', result[0].name)
def _compare_search_expressions(self, **expression):
# print "Testing search expression %s" % expression
all_services = pynag.Model.Service.objects.all
result1 = pynag.Model.Service.objects.filter(**expression)
result2 = pynag.Utils.grep(all_services, **expression)
self.assertEqual(
result1, result2, msg="Search output from pynag.Utils.grep() does not match pynag.Model.Service.objects.filter() when using parameters %s\nFilter: %s\nGrep: %s" %
(expression, result1, result2))
return len(result1)
def test_run_command_file_not_found(self):
command = '/bin/doesnotexist'
expected_msg = '\* Could not run command \(return code= %s\)\n' % 127
expected_msg += '\* Error was:\n.*: %s: (not found|No such file or directory)\n' % command
expected_msg += '\* Command was:\n%s\n' % command
expected_msg += '\* Output was:\n\n'
expected_msg += 'Check if y/our path is correct: %s' % os.getenv(
'PATH')
self.assertRaisesRegexp(
utils.PynagError, expected_msg, utils.runCommand, command, raise_error_on_fail=True)
def test_gitrepo_init_empty(self):
from getpass import getuser
from platform import node
emptyish = [None, '', ' ', '\n ']
for x in emptyish:
repo = utils.GitRepo(
directory=self.tmp_dir,
auto_init=True,
author_name=x,
author_email=x
)
self.assertEquals(repo.author_name, 'Pynag User')
expected_email = '%s@%s' % (getuser(), node())
self.assertEquals(repo.author_email, expected_email)
def test_gitrepo_init_with_author(self):
tempfile.mkstemp(dir=self.tmp_dir)
author_name = 'Git Owner'
author_email = '[email protected]'
repo = utils.GitRepo(
directory=self.tmp_dir,
auto_init=True,
author_name=author_name,
author_email=author_email
)
self.assertEquals(repo.author_name, author_name)
self.assertEquals(repo.author_email, author_email)
self.assertEquals(len(repo.log()), 1)
self.assertEquals(repo.log()[0]['author_name'], author_name)
self.assertEquals(repo.log()[0]['author_email'], author_email)
def test_gitrepo_init_with_files(self):
tempfile.mkstemp(dir=self.tmp_dir)
# If pynag defaults will fail, correctly, adjust for test
author_email = None
from getpass import getuser
from platform import node
nodename = node()
if nodename.endswith('.(none)'):
nodename[:-7] + '.example.com'
author_email = '%s@%s' % (getuser(), nodename)
repo = utils.GitRepo(
directory=self.tmp_dir,
auto_init=True,
author_name=None,
author_email=author_email
)
# Check that there is an initial commit
expected_email = '%s@%s' % (getuser(), nodename)
self.assertEquals(len(repo.log()), 1)
self.assertEquals(repo.log()[0]['comment'], 'Initial Commit')
self.assertEquals(repo.log()[0]['author_name'], 'Pynag User')
self.assertEquals(repo.log()[0]['author_email'], expected_email)
# Test kwargs functionality
self.assertEquals(
repo.log(author_email=expected_email)[0]['author_email'], expected_email)
self.assertEquals(
repo.log(comment__contains='Initial')[0]['comment'], 'Initial Commit')
self.assertEquals(len(repo.log(comment__contains='nothing')), 0)
# Test show method
initial_hash = repo.log()[0]['hash']
initial_hash_valid_commits = repo.get_valid_commits()[0]
self.assertEquals(initial_hash, initial_hash_valid_commits)
gitrunpatcher = patch('pynag.Utils.GitRepo._run_command')
validcommitspatcher = patch('pynag.Utils.GitRepo.get_valid_commits')
gitrunpatch = gitrunpatcher.start()
validcommitspatch = validcommitspatcher.start()
validcommitspatch.return_value = [initial_hash]
repo.show(initial_hash)
gitrunpatch.assert_called_once_with('git show %s' % initial_hash)
gitrunpatcher.stop()
validcommitspatcher.stop()
self.assertRaisesRegexp(
PynagError, '%s is not a valid commit id' % initial_hash)
# Add file
tempfile.mkstemp(dir=self.tmp_dir)
self.assertEquals(len(repo.get_uncommited_files()), 1)
self.assertEquals(repo.is_up_to_date(), False)
# Commit file
repo.commit(filelist=repo.get_uncommited_files()[0]['filename'])
self.assertEquals(repo.is_up_to_date(), True)
self.assertEquals(len(repo.get_uncommited_files()), 0)
self.assertEquals(len(repo.get_valid_commits()), 2)
log_entry = repo.log()[0]
self.assertEquals(log_entry['comment'], 'commited by pynag')
def test_gitrepo_deprecated_methods(self):
"""
Delete this class as deprecated methods are removed.
"""
repo = utils.GitRepo(directory=self.tmp_dir, auto_init=True)
testfilename = 'testfile.name.txt'
add_method_patcher = patch('pynag.Utils.GitRepo.add')
add_method_patch = add_method_patcher.start()
repo._git_add(testfilename)
add_method_patch.assert_called_once_with(testfilename)
add_method_patcher.stop()
commit_method_mocker = patch('pynag.Utils.GitRepo.commit')
commit_method_mock = commit_method_mocker.start()
repo._git_commit(filename=testfilename, message='test')
commit_method_mock.assert_called_once_with(
message='test', filelist=[testfilename])
commit_method_mock.reset_mock()
repo._git_commit(
filename=None, message='test', filelist=[testfilename])
commit_method_mock.assert_called_once_with(
message='test', filelist=[testfilename])
commit_method_mock.reset_mock()
repo._git_commit(
filename=testfilename, message='test', filelist=[testfilename])
commit_method_mock.assert_called_once_with(
message='test', filelist=[testfilename, testfilename])
commit_method_mocker.stop()
def test_gitrepo_diff(self):
""" Test git diff works as expected """
# Create repo and write one test commit
git = utils.GitRepo(directory=self.tmp_dir, auto_init=True)
tmp_filename = "%s/%s" % (self.tmp_dir, 'testfile.txt')
open(tmp_filename, 'w').write('test data\n')
git.commit()
# First try diff with no changes made:
diff = git.diff()
self.assertEquals(diff, '')
# Now append to our file and see the difference:
extra_data = 'extra data\n'
open(tmp_filename, 'a').write(extra_data)
# Call diff with no params, check if extra_data is in the diff
diff = git.diff()
self.assertTrue(diff.find(extra_data) > 0)
# Call diff with filename as parameter, check if extra_data is in the
# diff
diff = git.diff(commit_id_or_filename=tmp_filename)
self.assertTrue(diff.find(extra_data) > 0)
# Call commit again and confirm there is no diff
git.commit()
diff = git.diff()
self.assertEquals(diff, '')
# Call a diff against first commit, see if we find our changes in the
# commit.
all_commits = git.get_valid_commits()
first_commit = all_commits.pop()
diff = git.diff(commit_id_or_filename=first_commit)
self.assertTrue(diff.find(extra_data) > 0)
# Revert latest change, and make sure diff is gone.
last_commit = all_commits.pop(0)
git.revert(last_commit)
diff = git.diff(commit_id_or_filename=first_commit)
self.assertTrue(diff.find(extra_data) == -1)
# At last try to diff against an invalid commit id
try:
git.diff('invalid commit id')
self.assertTrue(
False, "we wanted exception when calling diff on invalid commit id")
except PynagError:
pass
def test_send_nsca(self):
""" test pynag.Utils.send_nsca
By its very nature, send_nsca binary itself does not allow for much testing,
however we can still test if the function is working as expected
"""
# Run send_nsca normally for a smoke test (we don't know much about what send_nsca will do with out packet)
# This test will only fail if there are unhandled tracebacks in the
# code somewhere
try:
pynag.Utils.send_nsca(code=0, message="test", nscahost="localhost")
except OSError as e:
# We don't care about the result if we have error because send_nsca
# is not installed
if e.errno != 2:
raise e
result = pynag.Utils.send_nsca(
code=0, message="match", nscahost="match", hostname="test", service=None, nscabin="/bin/grep", nscaconf="-")
self.assertEqual(0, result[0])
self.assertEqual('(standard input):1\n', result[1])
result = pynag.Utils.send_nsca(
code=0, message="match", nscahost="nomatch", hostname="test", service=None, nscabin="/bin/grep", nscaconf="-")
self.assertEqual(1, result[0])
self.assertEqual('(standard input):0\n', result[1])
class testFakeNagiosEnvironment(unittest.TestCase):
def setUp(self):
self.environment = pynag.Utils.misc.FakeNagiosEnvironment()
self.environment.create_minimal_environment()
def tearDown(self):
self.environment.terminate()
def testMinimal(self):
""" Minimal Test of our FakeNagiosEnvironment """
nagios = pynag.Utils.misc.FakeNagiosEnvironment()
nagios.create_minimal_environment()
nagios.config.parse()
self.assertTrue(os.path.isfile(nagios.config.cfg_file))
self.assertTrue(os.path.isdir(nagios.objects_dir))
def testModelUpdates(self):
""" Test backup and restores of Model global variables """
nagios = self.environment
original_config = pynag.Model.config
original_cfg_file = pynag.Model.cfg_file
original_dir = pynag.Model.pynag_directory
# Update model, and check if updates succeeded
nagios.update_model()
self.assertEqual(pynag.Model.config, nagios.config)
self.assertEqual(pynag.Model.cfg_file, nagios.config.cfg_file)
self.assertEqual(pynag.Model.pynag_directory, nagios.objects_dir)
# See if we can restore our model
nagios.restore_model()
self.assertEqual(pynag.Model.config, original_config)
self.assertEqual(pynag.Model.cfg_file, original_cfg_file)
self.assertEqual(pynag.Model.pynag_directory, original_dir)
def testStartStop(self):
""" Try to start and stop our nagios environment """
self.environment.start()
pid = open(os.path.join(self.environment.tempdir, "nagios.pid")).read()
pid = int(pid)
try:
os.kill(pid, 0)
except OSError:
self.assertTrue(False, "Did not find a running process with process_id=%s" % pid)
self.environment.stop()
try:
os.kill(pid, 0)
self.assertTrue(False, "Seems like process with process_id=%s is still running" % pid)
except OSError:
pass
def testOpenDecorator(self):
""" Makes sure the fake nagios environment cannot go outside its directory """
# Try to open a regular file
self.environment.config.open(self.environment.config.cfg_file).close()
self.assertTrue(True, "Successfully opened nagios.cfg")
try:
self.environment.config.open("/etc/passwd").close()
self.assertTrue(False, "I was able to open a file outside my tempdir!")
except PynagError:
pass
def testUpdateModel_NoRestore(self):
self.environment.update_model()
def testLivestatus(self):
host_name = "localhost"
self.environment.update_model()
pynag.Model.Host(host_name=host_name, use="generic-host").save()
self.environment.guess_livestatus_path()
self.environment.configure_livestatus()
self.environment.start()
livestatus = self.environment.get_livestatus()
hosts = livestatus.get_hosts(name=host_name)
self.assertTrue(hosts, "Could not find a host called %s" % (host_name))
def testImports(self):
""" Test FakeNagiosEnvironment.import_config() """
host1 = "host1"
host2 = "host2"
tempdir = tempfile.mkdtemp()
tempfile1 = tempfile.mktemp(suffix='.cfg')
tempfile2 = os.path.join(tempdir, 'file2.cfg')
with open(tempfile1, 'w') as f:
f.write("define host {\nname host1\n}")
with open(tempfile2, 'w') as f:
f.write("define host {\nname host2\n}")
self.environment.import_config(tempdir)
self.environment.import_config(tempfile1)
self.environment.update_model()
host1 = pynag.Model.Host.objects.filter(name=host1)
host2 = pynag.Model.Host.objects.filter(name=host2)
self.assertTrue(host1)
self.assertTrue(host2)
if __name__ == "__main__":
unittest.main()
# vim: sts=4 expandtab autoindent
|
gpl-2.0
|
IveWong/jasmine
|
setup.py
|
56
|
1974
|
from setuptools import setup, find_packages, os
import json
with open('package.json') as packageFile:
version = json.load(packageFile)['version']
setup(
name="jasmine-core",
version=version,
url="http://jasmine.github.io",
author="Pivotal Labs",
author_email="[email protected]",
description=('Jasmine is a Behavior Driven Development testing framework for JavaScript. It does not rely on '+
'browsers, DOM, or any JavaScript framework. Thus it\'s suited for websites, '+
'Node.js (http://nodejs.org) projects, or anywhere that JavaScript can run.'),
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
packages=['jasmine_core', 'jasmine_core.images'],
package_dir={'jasmine_core': 'lib/jasmine-core', 'jasmine_core.images': 'images'},
package_data={'jasmine_core': ['*.js', '*.css'], 'jasmine_core.images': ['*.png']},
include_package_data=True,
install_requires=['glob2>=0.4.1', 'ordereddict==1.1']
)
|
mit
|
semonte/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/repo.py
|
88
|
1379
|
# repo.py - repository base classes for mercurial
#
# Copyright 2005, 2006 Matt Mackall <[email protected]>
# Copyright 2006 Vadim Gelfer <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import error
class repository(object):
def capable(self, name):
'''tell whether repo supports named capability.
return False if not supported.
if boolean capability, return True.
if string capability, return string.'''
if name in self.capabilities:
return True
name_eq = name + '='
for cap in self.capabilities:
if cap.startswith(name_eq):
return cap[len(name_eq):]
return False
def requirecap(self, name, purpose):
'''raise an exception if the given capability is not present'''
if not self.capable(name):
raise error.CapabilityError(
_('cannot %s; remote repository does not '
'support the %r capability') % (purpose, name))
def local(self):
return False
def cancopy(self):
return self.local()
def rjoin(self, path):
url = self.url()
if url.endswith('/'):
return url + path
else:
return url + '/' + path
|
apache-2.0
|
angelapper/edx-platform
|
common/djangoapps/track/tests/test_shim.py
|
24
|
10533
|
"""Ensure emitted events contain the fields legacy processors expect to find."""
from collections import namedtuple
import ddt
from django.test.utils import override_settings
from mock import sentinel
from openedx.core.lib.tests.assertions.events import assert_events_equal
from . import FROZEN_TIME, EventTrackingTestCase
from .. import transformers
from ..shim import PrefixedEventProcessor
LEGACY_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
GOOGLE_ANALYTICS_PROCESSOR = [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
@override_settings(
EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR,
)
class LegacyFieldMappingProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields legacy processors expect to find."""
def test_event_field_mapping(self):
data = {sentinel.key: sentinel.value}
context = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'username': sentinel.username,
'session': sentinel.session,
'ip': sentinel.ip,
'host': sentinel.host,
'agent': sentinel.agent,
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'event_type': sentinel.name,
'name': sentinel.name,
'context': {
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'path': sentinel.path,
},
'event': data,
'username': sentinel.username,
'event_source': 'server',
'time': FROZEN_TIME,
'agent': sentinel.agent,
'host': sentinel.host,
'ip': sentinel.ip,
'page': None,
'session': sentinel.session,
}
assert_events_equal(expected_event, emitted_event)
def test_missing_fields(self):
self.tracker.emit(sentinel.name)
emitted_event = self.get_event()
expected_event = {
'accept_language': '',
'referer': '',
'event_type': sentinel.name,
'name': sentinel.name,
'context': {},
'event': {},
'username': '',
'event_source': 'server',
'time': FROZEN_TIME,
'agent': '',
'host': '',
'ip': '',
'page': None,
'session': '',
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=GOOGLE_ANALYTICS_PROCESSOR,
)
class GoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields necessary for Google Analytics."""
def test_event_fields(self):
""" Test that course_id is added as the label if present, and nonInteraction is set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
def test_no_course_id(self):
""" Test that a label is not added if course_id is not specified, but nonInteraction is still set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_BACKENDS={
'0': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'first': {'ENGINE': 'track.tests.InMemoryBackend'}
},
'processors': [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
}
},
'1': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'second': {
'ENGINE': 'track.tests.InMemoryBackend'
}
}
}
}
}
)
class MultipleShimGoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure changes don't impact other backends"""
def test_multiple_backends(self):
data = {
sentinel.key: sentinel.value,
}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
segment_emitted_event = self.tracker.backends['0'].backends['first'].events[0]
log_emitted_event = self.tracker.backends['1'].backends['second'].events[0]
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, segment_emitted_event)
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, log_emitted_event)
SequenceDDT = namedtuple('SequenceDDT', ['action', 'tab_count', 'current_tab', 'legacy_event_type'])
@ddt.ddt
class EventTransformerRegistryTestCase(EventTrackingTestCase):
"""
Test the behavior of the event registry
"""
def setUp(self):
super(EventTransformerRegistryTestCase, self).setUp()
self.registry = transformers.EventTransformerRegistry()
@ddt.data(
('edx.ui.lms.sequence.next_selected', transformers.NextSelectedEventTransformer),
('edx.ui.lms.sequence.previous_selected', transformers.PreviousSelectedEventTransformer),
('edx.ui.lms.sequence.tab_selected', transformers.SequenceTabSelectedEventTransformer),
('edx.video.foo.bar', transformers.VideoEventTransformer),
)
@ddt.unpack
def test_event_registry_dispatch(self, event_name, expected_transformer):
event = {'name': event_name}
transformer = self.registry.create_transformer(event)
self.assertIsInstance(transformer, expected_transformer)
@ddt.data(
'edx.ui.lms.sequence.next_selected.what',
'edx',
'unregistered_event',
)
def test_dispatch_to_nonexistent_events(self, event_name):
event = {'name': event_name}
with self.assertRaises(KeyError):
self.registry.create_transformer(event)
@ddt.ddt
class PrefixedEventProcessorTestCase(EventTrackingTestCase):
"""
Test PrefixedEventProcessor
"""
@ddt.data(
SequenceDDT(action=u'next', tab_count=5, current_tab=3, legacy_event_type=u'seq_next'),
SequenceDDT(action=u'next', tab_count=5, current_tab=5, legacy_event_type=None),
SequenceDDT(action=u'previous', tab_count=5, current_tab=3, legacy_event_type=u'seq_prev'),
SequenceDDT(action=u'previous', tab_count=5, current_tab=1, legacy_event_type=None),
)
def test_sequence_linear_navigation(self, sequence_ddt):
event_name = u'edx.ui.lms.sequence.{}_selected'.format(sequence_ddt.action)
event = {
u'name': event_name,
u'event': {
u'current_tab': sequence_ddt.current_tab,
u'tab_count': sequence_ddt.tab_count,
u'id': u'ABCDEFG',
}
}
process_event_shim = PrefixedEventProcessor()
result = process_event_shim(event)
# Legacy fields get added when needed
if sequence_ddt.action == u'next':
offset = 1
else:
offset = -1
if sequence_ddt.legacy_event_type:
self.assertEqual(result[u'event_type'], sequence_ddt.legacy_event_type)
self.assertEqual(result[u'event'][u'old'], sequence_ddt.current_tab)
self.assertEqual(result[u'event'][u'new'], sequence_ddt.current_tab + offset)
else:
self.assertNotIn(u'event_type', result)
self.assertNotIn(u'old', result[u'event'])
self.assertNotIn(u'new', result[u'event'])
def test_sequence_tab_navigation(self):
event_name = u'edx.ui.lms.sequence.tab_selected'
event = {
u'name': event_name,
u'event': {
u'current_tab': 2,
u'target_tab': 5,
u'tab_count': 9,
u'id': u'block-v1:abc',
u'widget_placement': u'top',
}
}
process_event_shim = PrefixedEventProcessor()
result = process_event_shim(event)
self.assertEqual(result[u'event_type'], u'seq_goto')
self.assertEqual(result[u'event'][u'old'], 2)
self.assertEqual(result[u'event'][u'new'], 5)
|
agpl-3.0
|
m-urban/beets
|
test/test_mpdstats.py
|
24
|
1572
|
# This file is part of beets.
# Copyright 2015
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from mock import Mock
from test._common import unittest
from test.helper import TestHelper
from beets.library import Item
from beetsplug.mpdstats import MPDStats
class MPDStatsTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('mpdstats')
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
def test_update_rating(self):
item = Item(title='title', path='', id=1)
item.add(self.lib)
log = Mock()
mpdstats = MPDStats(self.lib, log)
self.assertFalse(mpdstats.update_rating(item, True))
self.assertFalse(mpdstats.update_rating(None, True))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
mit
|
nysan/yocto-autobuilder
|
lib/python2.6/site-packages/SQLAlchemy-0.7.1-py2.6-linux-x86_64.egg/sqlalchemy/dialects/postgresql/base.py
|
8
|
53160
|
# postgresql/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the PostgreSQL database.
For information on connecting using specific drivers, see the documentation
section regarding that driver.
Sequences/SERIAL
----------------
PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
of creating new primary key values for integer-based primary key columns. When
creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
integer-based primary key columns, which generates a sequence and server side
default corresponding to the column.
To specify a specific named sequence to be used for primary key generation,
use the :func:`~sqlalchemy.schema.Sequence` construct::
Table('sometable', metadata,
Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
)
When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
having the "last insert identifier" available, a RETURNING clause is added to
the INSERT statement which specifies the primary key columns should be
returned after the statement completes. The RETURNING functionality only takes
place if Postgresql 8.2 or later is in use. As a fallback approach, the
sequence, whether specified explicitly or implicitly via ``SERIAL``, is
executed independently beforehand, the returned value to be used in the
subsequent insert. Note that when an
:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
"executemany" semantics, the "last inserted identifier" functionality does not
apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
case.
To force the usage of RETURNING by default off, specify the flag
``implicit_returning=False`` to :func:`.create_engine`.
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level`` parameter which results
in the command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL
<level>`` being invoked for every new connection. Valid values for this
parameter are ``READ_COMMITTED``, ``READ_UNCOMMITTED``, ``REPEATABLE_READ``,
and ``SERIALIZABLE``. Note that the psycopg2 dialect does *not* use this
technique and uses psycopg2-specific APIs (see that dialect for details).
INSERT/UPDATE...RETURNING
-------------------------
The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default
for single-row INSERT statements in order to fetch newly generated
primary key identifiers. To specify an explicit ``RETURNING`` clause,
use the :meth:`._UpdateBase.returning` method on a per-statement basis::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
result = table.update().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo').values(name='bar')
print result.fetchall()
# DELETE..RETURNING
result = table.delete().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo')
print result.fetchall()
Indexes
-------
PostgreSQL supports partial indexes. To create them pass a postgresql_where
option to the Index constructor::
Index('my_index', my_table.c.id, postgresql_where=tbl.c.value > 10)
"""
import re
from sqlalchemy import sql, schema, exc, util
from sqlalchemy.engine import default, reflection
from sqlalchemy.sql import compiler, expression, util as sql_util
from sqlalchemy import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
CHAR, TEXT, FLOAT, NUMERIC, \
DATE, BOOLEAN, REAL
RESERVED_WORDS = set(
["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
"asymmetric", "both", "case", "cast", "check", "collate", "column",
"constraint", "create", "current_catalog", "current_date",
"current_role", "current_time", "current_timestamp", "current_user",
"default", "deferrable", "desc", "distinct", "do", "else", "end",
"except", "false", "fetch", "for", "foreign", "from", "grant", "group",
"having", "in", "initially", "intersect", "into", "leading", "limit",
"localtime", "localtimestamp", "new", "not", "null", "off", "offset",
"old", "on", "only", "or", "order", "placing", "primary", "references",
"returning", "select", "session_user", "some", "symmetric", "table",
"then", "to", "trailing", "true", "union", "unique", "user", "using",
"variadic", "when", "where", "window", "with", "authorization",
"between", "binary", "cross", "current_schema", "freeze", "full",
"ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
"notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
])
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
class BYTEA(sqltypes.LargeBinary):
__visit_name__ = 'BYTEA'
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = 'DOUBLE_PRECISION'
class INET(sqltypes.TypeEngine):
__visit_name__ = "INET"
PGInet = INET
class CIDR(sqltypes.TypeEngine):
__visit_name__ = "CIDR"
PGCidr = CIDR
class MACADDR(sqltypes.TypeEngine):
__visit_name__ = "MACADDR"
PGMacAddr = MACADDR
class TIMESTAMP(sqltypes.TIMESTAMP):
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
class INTERVAL(sqltypes.TypeEngine):
"""Postgresql INTERVAL type.
The INTERVAL type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000 or zxjdbc.
"""
__visit_name__ = 'INTERVAL'
def __init__(self, precision=None):
self.precision = precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(precision=interval.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
PGInterval = INTERVAL
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
def __init__(self, length=None, varying=False):
if not varying:
# BIT without VARYING defaults to length 1
self.length = length or 1
else:
# but BIT VARYING can be unlimited-length, so no default
self.length = length
self.varying = varying
PGBit = BIT
class UUID(sqltypes.TypeEngine):
"""Postgresql UUID type.
Represents the UUID column type, interpreting
data either as natively returned by the DBAPI
or as Python uuid objects.
The UUID type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = 'UUID'
def __init__(self, as_uuid=False):
"""Construct a UUID type.
:param as_uuid=False: if True, values will be interpreted
as Python uuid objects, converting to/from string via the
DBAPI.
"""
if as_uuid and _python_UUID is None:
raise NotImplementedError(
"This version of Python does not support the native UUID type."
)
self.as_uuid = as_uuid
def bind_processor(self, dialect):
if self.as_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
if self.as_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
else:
return None
PGUuid = UUID
class ARRAY(sqltypes.MutableType, sqltypes.Concatenable, sqltypes.TypeEngine):
"""Postgresql ARRAY type.
Represents values as Python lists.
The ARRAY type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = 'ARRAY'
def __init__(self, item_type, mutable=False, as_tuple=False):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such. The type mapping figures out on
the fly
:param mutable=False: Specify whether lists passed to this
class should be considered mutable - this enables
"mutable types" mode in the ORM. Be sure to read the
notes for :class:`.MutableType` regarding ORM
performance implications (default changed from ``True`` in
0.7.0).
.. note:: This functionality is now superseded by the
``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel`.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable. This flag can only
be set to ``True`` when ``mutable`` is set to
``False``. (new in 0.6.5)
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.mutable = mutable
if mutable and as_tuple:
raise exc.ArgumentError(
"mutable must be set to False if as_tuple is True."
)
self.as_tuple = as_tuple
def copy_value(self, value):
if value is None:
return None
elif self.mutable:
return list(value)
else:
return value
def compare_values(self, x, y):
return x == y
def is_mutable(self):
return self.mutable
def bind_processor(self, dialect):
item_proc = self.item_type.dialect_impl(dialect).bind_processor(dialect)
if item_proc:
def convert_item(item):
if isinstance(item, (list, tuple)):
return [convert_item(child) for child in item]
else:
return item_proc(item)
else:
def convert_item(item):
if isinstance(item, (list, tuple)):
return [convert_item(child) for child in item]
else:
return item
def process(value):
if value is None:
return value
return [convert_item(item) for item in value]
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.dialect_impl(dialect).result_processor(dialect, coltype)
if item_proc:
def convert_item(item):
if isinstance(item, list):
r = [convert_item(child) for child in item]
if self.as_tuple:
r = tuple(r)
return r
else:
return item_proc(item)
else:
def convert_item(item):
if isinstance(item, list):
r = [convert_item(child) for child in item]
if self.as_tuple:
r = tuple(r)
return r
else:
return item
def process(value):
if value is None:
return value
r = [convert_item(item) for item in value]
if self.as_tuple:
r = tuple(r)
return r
return process
PGArray = ARRAY
class ENUM(sqltypes.Enum):
def create(self, bind=None, checkfirst=True):
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
not bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(DropEnumType(self))
def _on_table_create(self, event, target, bind, **kw):
self.create(bind=bind, checkfirst=True)
def _on_metadata_create(self, event, target, bind, **kw):
if self.metadata is not None:
self.create(bind=bind, checkfirst=True)
def _on_metadata_drop(self, event, target, bind, **kw):
self.drop(bind=bind, checkfirst=True)
colspecs = {
sqltypes.Interval:INTERVAL,
sqltypes.Enum:ENUM,
}
ischema_names = {
'integer' : INTEGER,
'bigint' : BIGINT,
'smallint' : SMALLINT,
'character varying' : VARCHAR,
'character' : CHAR,
'"char"' : sqltypes.String,
'name' : sqltypes.String,
'text' : TEXT,
'numeric' : NUMERIC,
'float' : FLOAT,
'real' : REAL,
'inet': INET,
'cidr': CIDR,
'uuid': UUID,
'bit': BIT,
'bit varying': BIT,
'macaddr': MACADDR,
'double precision' : DOUBLE_PRECISION,
'timestamp' : TIMESTAMP,
'timestamp with time zone' : TIMESTAMP,
'timestamp without time zone' : TIMESTAMP,
'time with time zone' : TIME,
'time without time zone' : TIME,
'date' : DATE,
'time': TIME,
'bytea' : BYTEA,
'boolean' : BOOLEAN,
'interval':INTERVAL,
'interval year to month':INTERVAL,
'interval day to second':INTERVAL,
}
class PGCompiler(compiler.SQLCompiler):
def visit_match_op(self, binary, **kw):
return "%s @@ to_tsquery(%s)" % (
self.process(binary.left),
self.process(binary.right))
def visit_ilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s ILIKE %s' % \
(self.process(binary.left), self.process(binary.right)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def visit_notilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT ILIKE %s' % \
(self.process(binary.left), self.process(binary.right)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def render_literal_value(self, value, type_):
value = super(PGCompiler, self).render_literal_value(value, type_)
# TODO: need to inspect "standard_conforming_strings"
if self.dialect._backslash_escapes:
value = value.replace('\\', '\\\\')
return value
def visit_sequence(self, seq):
return "nextval('%s')" % self.preparer.format_sequence(seq)
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += " \n LIMIT " + self.process(sql.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += " \n LIMIT ALL"
text += " OFFSET " + self.process(sql.literal(select._offset))
return text
def get_select_precolumns(self, select):
if select._distinct is not False:
if select._distinct is True:
return "DISTINCT "
elif isinstance(select._distinct, (list, tuple)):
return "DISTINCT ON (" + ', '.join(
[self.process(col) for col in select._distinct]
)+ ") "
else:
return "DISTINCT ON (" + self.process(select._distinct) + ") "
else:
return ""
def for_update_clause(self, select):
if select.for_update == 'nowait':
return " FOR UPDATE NOWAIT"
else:
return super(PGCompiler, self).for_update_clause(select)
def returning_clause(self, stmt, returning_cols):
columns = [
self.process(
self.label_select_column(None, c, asfrom=False),
within_columns_clause=True,
result_map=self.result_map)
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
if extract.expr.type:
affinity = extract.expr.type._type_affinity
else:
affinity = None
casts = {
sqltypes.Date:'date',
sqltypes.DateTime:'timestamp',
sqltypes.Interval:'interval', sqltypes.Time:'time'
}
cast = casts.get(affinity, None)
if isinstance(extract.expr, sql.ColumnElement) and cast is not None:
expr = extract.expr.op('::')(sql.literal_column(cast))
else:
expr = extract.expr
return "EXTRACT(%s FROM %s)" % (
field, self.process(expr))
class PGDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
impl_type = column.type.dialect_impl(self.dialect)
if column.primary_key and \
column is column.table._autoincrement_column and \
not isinstance(impl_type, sqltypes.SmallInteger) and \
(
column.default is None or
(
isinstance(column.default, schema.Sequence) and
column.default.optional
)
):
if isinstance(impl_type, sqltypes.BigInteger):
colspec += " BIGSERIAL"
else:
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def visit_create_enum_type(self, create):
type_ = create.element
return "CREATE TYPE %s AS ENUM (%s)" % (
self.preparer.format_type(type_),
",".join("'%s'" % e for e in type_.enums)
)
def visit_drop_enum_type(self, drop):
type_ = drop.element
return "DROP TYPE %s" % (
self.preparer.format_type(type_)
)
def visit_create_index(self, create):
preparer = self.preparer
index = create.element
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (preparer.quote(
self._index_identifier(index.name), index.quote),
preparer.format_table(index.table),
', '.join([preparer.format_column(c)
for c in index.columns]))
if "postgres_where" in index.kwargs:
whereclause = index.kwargs['postgres_where']
util.warn_deprecated(
"The 'postgres_where' argument has been renamed "
"to 'postgresql_where'.")
elif 'postgresql_where' in index.kwargs:
whereclause = index.kwargs['postgresql_where']
else:
whereclause = None
if whereclause is not None:
whereclause = sql_util.expression_as_ddl(whereclause)
where_compiled = self.sql_compiler.process(whereclause)
text += " WHERE " + where_compiled
return text
class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_INET(self, type_):
return "INET"
def visit_CIDR(self, type_):
return "CIDR"
def visit_MACADDR(self, type_):
return "MACADDR"
def visit_FLOAT(self, type_):
if not type_.precision:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': type_.precision}
def visit_DOUBLE_PRECISION(self, type_):
return "DOUBLE PRECISION"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_enum(self, type_):
if not type_.native_enum or not self.dialect.supports_native_enum:
return super(PGTypeCompiler, self).visit_enum(type_)
else:
return self.visit_ENUM(type_)
def visit_ENUM(self, type_):
return self.dialect.identifier_preparer.format_type(type_)
def visit_TIMESTAMP(self, type_):
return "TIMESTAMP%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_TIME(self, type_):
return "TIME%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_INTERVAL(self, type_):
if type_.precision is not None:
return "INTERVAL(%d)" % type_.precision
else:
return "INTERVAL"
def visit_BIT(self, type_):
if type_.varying:
compiled = "BIT VARYING"
if type_.length is not None:
compiled += "(%d)" % type_.length
else:
compiled = "BIT(%d)" % type_.length
return compiled
def visit_UUID(self, type_):
return "UUID"
def visit_large_binary(self, type_):
return self.visit_BYTEA(type_)
def visit_BYTEA(self, type_):
return "BYTEA"
def visit_ARRAY(self, type_):
return self.process(type_.item_type) + '[]'
class PGIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
value = value[1:-1].\
replace(self.escape_to_quote, self.escape_quote)
return value
def format_type(self, type_, use_schema=True):
if not type_.name:
raise exc.ArgumentError("Postgresql ENUM type requires a name.")
name = self.quote(type_.name, type_.quote)
if not self.omit_schema and use_schema and type_.schema is not None:
name = self.quote_schema(type_.schema, type_.quote) + "." + name
return name
class PGInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_oid(self, table_name, schema=None):
"""Return the oid from `table_name` and `schema`."""
return self.dialect.get_table_oid(self.bind, table_name, schema,
info_cache=self.info_cache)
class CreateEnumType(schema._CreateDropBase):
__visit_name__ = "create_enum_type"
class DropEnumType(schema._CreateDropBase):
__visit_name__ = "drop_enum_type"
class PGExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar(("select nextval('%s')" % \
self.dialect.identifier_preparer.format_sequence(seq)), type_)
def get_insert_default(self, column):
if column.primary_key and column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar("select %s" %
column.server_default.arg, column.type)
elif (column.default is None or
(column.default.is_sequence and
column.default.optional)):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
# generates server side.
try:
seq_name = column._postgresql_seq_name
except AttributeError:
tab = column.table.name
col = column.name
tab = tab[0:29 + max(0, (29 - len(col)))]
col = col[0:29 + max(0, (29 - len(tab)))]
column._postgresql_seq_name = seq_name = "%s_%s_seq" % (tab, col)
sch = column.table.schema
if sch is not None:
exc = "select nextval('\"%s\".\"%s\"')" % \
(sch, seq_name)
else:
exc = "select nextval('\"%s\"')" % \
(seq_name, )
return self._execute_scalar(exc, column.type)
return super(PGExecutionContext, self).get_insert_default(column)
class PGDialect(default.DefaultDialect):
name = 'postgresql'
supports_alter = True
max_identifier_length = 63
supports_sane_rowcount = True
supports_native_enum = True
supports_native_boolean = True
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = True
postfetch_lastrowid = False
supports_default_values = True
supports_empty_insert = False
default_paramstyle = 'pyformat'
ischema_names = ischema_names
colspecs = colspecs
statement_compiler = PGCompiler
ddl_compiler = PGDDLCompiler
type_compiler = PGTypeCompiler
preparer = PGIdentifierPreparer
execution_ctx_cls = PGExecutionContext
inspector = PGInspector
isolation_level = None
# TODO: need to inspect "standard_conforming_strings"
_backslash_escapes = True
def __init__(self, isolation_level=None, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (8, 2) and \
self.__dict__.get('implicit_returning', True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
# pop base Enum type
self.colspecs.pop(sqltypes.Enum, None)
# psycopg2, others may have placed ENUM here as well
self.colspecs.pop(ENUM, None)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(['SERIALIZABLE',
'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('show transaction isolation level')
val = cursor.fetchone()[0]
cursor.close()
return val.upper()
def do_begin_twophase(self, connection, xid):
self.do_begin(connection.connection)
def do_prepare_twophase(self, connection, xid):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
#FIXME: ugly hack to get out of transaction
# context when commiting recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
connection.execute("ROLLBACK")
connection.execute("ROLLBACK PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
connection.execute("COMMIT PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
resultset = connection.execute(
sql.text("SELECT gid FROM pg_prepared_xacts"))
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_table(self, connection, table_name, schema=None):
# seems like case gets folded in pg_class...
if schema is None:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=current_schema() and "
"lower(relname)=:name",
bindparams=[
sql.bindparam('name', unicode(table_name.lower()),
type_=sqltypes.Unicode)]
)
)
else:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=:schema and "
"lower(relname)=:name",
bindparams=[
sql.bindparam('name',
unicode(table_name.lower()), type_=sqltypes.Unicode),
sql.bindparam('schema',
unicode(schema), type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
def has_sequence(self, connection, sequence_name, schema=None):
if schema is None:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=current_schema() "
"and lower(relname)=:name",
bindparams=[
sql.bindparam('name', unicode(sequence_name.lower()),
type_=sqltypes.Unicode)
]
)
)
else:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=:schema and lower(relname)=:name",
bindparams=[
sql.bindparam('name', unicode(sequence_name.lower()),
type_=sqltypes.Unicode),
sql.bindparam('schema',
unicode(schema), type_=sqltypes.Unicode)
]
)
)
return bool(cursor.first())
def has_type(self, connection, type_name, schema=None):
bindparams = [
sql.bindparam('typname',
unicode(type_name), type_=sqltypes.Unicode),
sql.bindparam('nspname',
unicode(schema), type_=sqltypes.Unicode),
]
if schema is not None:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
WHERE t.typnamespace = n.oid
AND t.typname = :typname
AND n.nspname = :nspname
)
"""
else:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t
WHERE t.typname = :typname
AND pg_type_is_visible(t.oid)
)
"""
cursor = connection.execute(sql.text(query, bindparams=bindparams))
return bool(cursor.scalar())
def _get_server_version_info(self, connection):
v = connection.execute("select version()").scalar()
m = re.match('PostgreSQL (\d+)\.(\d+)(?:\.(\d+))?(?:devel)?', v)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % v)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
def get_table_oid(self, connection, table_name, schema=None, **kw):
"""Fetch the oid for schema.table_name.
Several reflection methods require the table oid. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_oid = None
if schema is not None:
schema_where_clause = "n.nspname = :schema"
else:
schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
query = """
SELECT c.oid
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE (%s)
AND c.relname = :table_name AND c.relkind in ('r','v')
""" % schema_where_clause
# Since we're binding to unicode, table_name and schema_name must be
# unicode.
table_name = unicode(table_name)
if schema is not None:
schema = unicode(schema)
s = sql.text(query, bindparams=[
sql.bindparam('table_name', type_=sqltypes.Unicode),
sql.bindparam('schema', type_=sqltypes.Unicode)
],
typemap={'oid':sqltypes.Integer}
)
c = connection.execute(s, table_name=table_name, schema=schema)
table_oid = c.scalar()
if table_oid is None:
raise exc.NoSuchTableError(table_name)
return table_oid
@reflection.cache
def get_schema_names(self, connection, **kw):
s = """
SELECT nspname
FROM pg_namespace
ORDER BY nspname
"""
rp = connection.execute(s)
# what about system tables?
# Py3K
#schema_names = [row[0] for row in rp \
# if not row[0].startswith('pg_')]
# Py2K
schema_names = [row[0].decode(self.encoding) for row in rp \
if not row[0].startswith('pg_')]
# end Py2K
return schema_names
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
result = connection.execute(
sql.text(u"SELECT relname FROM pg_class c "
"WHERE relkind = 'r' "
"AND '%s' = (select nspname from pg_namespace n "
"where n.oid = c.relnamespace) " %
current_schema,
typemap = {'relname':sqltypes.Unicode}
)
)
return [row[0] for row in result]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
s = """
SELECT relname
FROM pg_class c
WHERE relkind = 'v'
AND '%(schema)s' = (select nspname from pg_namespace n
where n.oid = c.relnamespace)
""" % dict(schema=current_schema)
# Py3K
#view_names = [row[0] for row in connection.execute(s)]
# Py2K
view_names = [row[0].decode(self.encoding)
for row in connection.execute(s)]
# end Py2K
return view_names
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
s = """
SELECT definition FROM pg_views
WHERE schemaname = :schema
AND viewname = :view_name
"""
rp = connection.execute(sql.text(s),
view_name=view_name, schema=current_schema)
if rp:
# Py3K
#view_def = rp.scalar()
# Py2K
view_def = rp.scalar().decode(self.encoding)
# end Py2K
return view_def
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
SQL_COLS = """
SELECT a.attname,
pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid)
for 128)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
AND a.atthasdef)
AS DEFAULT,
a.attnotnull, a.attnum, a.attrelid as table_oid
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = :table_oid
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
"""
s = sql.text(SQL_COLS,
bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)],
typemap={'attname':sqltypes.Unicode, 'default':sqltypes.Unicode}
)
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
domains = self._load_domains(connection)
enums = self._load_enums(connection)
# format columns
columns = []
for name, format_type, default, notnull, attnum, table_oid in rows:
## strip (5) from character varying(5), timestamp(5)
# with time zone, etc
attype = re.sub(r'\([\d,]+\)', '', format_type)
# strip '[]' from integer[], etc.
attype = re.sub(r'\[\]', '', attype)
nullable = not notnull
is_array = format_type.endswith('[]')
charlen = re.search('\(([\d,]+)\)', format_type)
if charlen:
charlen = charlen.group(1)
kwargs = {}
args = None
if attype == 'numeric':
if charlen:
prec, scale = charlen.split(',')
args = (int(prec), int(scale))
else:
args = ()
elif attype == 'double precision':
args = (53, )
elif attype == 'integer':
args = ()
elif attype in ('timestamp with time zone',
'time with time zone'):
kwargs['timezone'] = True
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype in ('timestamp without time zone',
'time without time zone', 'time'):
kwargs['timezone'] = False
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype == 'bit varying':
kwargs['varying'] = True
if charlen:
args = (int(charlen),)
else:
args = ()
elif attype in ('interval','interval year to month',
'interval day to second'):
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif charlen:
args = (int(charlen),)
else:
args = ()
while True:
if attype in self.ischema_names:
coltype = self.ischema_names[attype]
break
elif attype in enums:
enum = enums[attype]
coltype = ENUM
if "." in attype:
kwargs['schema'], kwargs['name'] = attype.split('.')
else:
kwargs['name'] = attype
args = tuple(enum['labels'])
break
elif attype in domains:
domain = domains[attype]
attype = domain['attype']
# A table can't override whether the domain is nullable.
nullable = domain['nullable']
if domain['default'] and not default:
# It can, however, override the default
# value, but can't set it to null.
default = domain['default']
continue
else:
coltype = None
break
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(attype, name))
coltype = sqltypes.NULLTYPE
# adjust the default value
autoincrement = False
if default is not None:
match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
if match is not None:
autoincrement = True
# the default is related to a Sequence
sch = schema
if '.' not in match.group(2) and sch is not None:
# unconditionally quote the schema name. this could
# later be enhanced to obey quoting rules /
# "quote schema"
default = match.group(1) + \
('"%s"' % sch) + '.' + \
match.group(2) + match.group(3)
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
columns.append(column_info)
return columns
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
PK_SQL = """
SELECT attname FROM pg_attribute
WHERE attrelid = (
SELECT indexrelid FROM pg_index i
WHERE i.indrelid = :table_oid
AND i.indisprimary = 't')
ORDER BY attnum
"""
t = sql.text(PK_SQL, typemap={'attname':sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
primary_keys = [r[0] for r in c.fetchall()]
return primary_keys
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
cols = self.get_primary_keys(connection, table_name,
schema=schema, **kw)
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
PK_CONS_SQL = """
SELECT conname
FROM pg_catalog.pg_constraint r
WHERE r.conrelid = :table_oid AND r.contype = 'p'
ORDER BY 1
"""
t = sql.text(PK_CONS_SQL, typemap={'conname':sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
name = c.scalar()
return {
'constrained_columns':cols,
'name':name
}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
preparer = self.identifier_preparer
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
FK_SQL = """
SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef
FROM pg_catalog.pg_constraint r
WHERE r.conrelid = :table AND r.contype = 'f'
ORDER BY 1
"""
t = sql.text(FK_SQL, typemap={
'conname':sqltypes.Unicode,
'condef':sqltypes.Unicode})
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef in c.fetchall():
m = re.search('FOREIGN KEY \((.*?)\) REFERENCES '
'(?:(.*?)\.)?(.*?)\((.*?)\)', condef).groups()
constrained_columns, referred_schema, \
referred_table, referred_columns = m
constrained_columns = [preparer._unquote_identifier(x)
for x in re.split(r'\s*,\s*', constrained_columns)]
if referred_schema:
referred_schema =\
preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == self.default_schema_name:
# no schema (i.e. its the default schema), and the table we're
# reflecting has the default schema explicit, then use that.
# i.e. try to use the user's conventions
referred_schema = schema
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [preparer._unquote_identifier(x)
for x in re.split(r'\s*,\s', referred_columns)]
fkey_d = {
'name' : conname,
'constrained_columns' : constrained_columns,
'referred_schema' : referred_schema,
'referred_table' : referred_table,
'referred_columns' : referred_columns
}
fkeys.append(fkey_d)
return fkeys
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid=ix.indexrelid
left outer join
pg_attribute a
on t.oid=a.attrelid and a.attnum=ANY(ix.indkey)
WHERE
t.relkind = 'r'
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
"""
t = sql.text(IDX_SQL, typemap={'attname':sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
index_names = {}
indexes = []
sv_idx_name = None
for row in c.fetchall():
idx_name, unique, expr, prd, col = row
if expr:
if idx_name != sv_idx_name:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s"
% idx_name)
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
"Predicate of partial index %s ignored during reflection"
% idx_name)
sv_idx_name = idx_name
if idx_name in index_names:
index_d = index_names[idx_name]
else:
index_d = {'column_names':[]}
indexes.append(index_d)
index_names[idx_name] = index_d
index_d['name'] = idx_name
if col is not None:
index_d['column_names'].append(col)
index_d['unique'] = unique
return indexes
def _load_enums(self, connection):
if not self.supports_native_enum:
return {}
## Load data types for enums:
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
-- t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema",
e.enumlabel as "label"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid
LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
WHERE t.typtype = 'e'
ORDER BY "name", e.oid -- e.oid gives us label order
"""
s = sql.text(SQL_ENUMS, typemap={
'attname':sqltypes.Unicode,
'label':sqltypes.Unicode})
c = connection.execute(s)
enums = {}
for enum in c.fetchall():
if enum['visible']:
# 'visible' just means whether or not the enum is in a
# schema that's on the search path -- or not overriden by
# a schema with higher presedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = enum['name']
else:
name = "%s.%s" % (enum['schema'], enum['name'])
if name in enums:
enums[name]['labels'].append(enum['label'])
else:
enums[name] = {
'labels': [enum['label']],
}
return enums
def _load_domains(self, connection):
## Load data types for domains:
SQL_DOMAINS = """
SELECT t.typname as "name",
pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
not t.typnotnull as "nullable",
t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid
WHERE t.typtype = 'd'
"""
s = sql.text(SQL_DOMAINS, typemap={'attname':sqltypes.Unicode})
c = connection.execute(s)
domains = {}
for domain in c.fetchall():
## strip (30) from character varying(30)
attype = re.search('([^\(]+)', domain['attype']).group(1)
if domain['visible']:
# 'visible' just means whether or not the domain is in a
# schema that's on the search path -- or not overriden by
# a schema with higher presedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = domain['name']
else:
name = "%s.%s" % (domain['schema'], domain['name'])
domains[name] = {
'attype':attype,
'nullable': domain['nullable'],
'default': domain['default']
}
return domains
|
gpl-2.0
|
shssoichiro/servo
|
tests/wpt/css-tests/tools/pytest/_pytest/cacheprovider.py
|
188
|
8939
|
"""
merged implementation of the cache provider
the name cache was not choosen to ensure pluggy automatically
ignores the external pytest-cache
"""
import py
import pytest
import json
from os.path import sep as _sep, altsep as _altsep
class Cache(object):
def __init__(self, config):
self.config = config
self._cachedir = config.rootdir.join(".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("cacheclear"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if _sep in name or _altsep is not None and _altsep in name:
raise ValueError("name is not allowed to contain path separators")
return self._cachedir.ensure_dir("d", name)
def _getvaluepath(self, key):
return self._cachedir.join('v', *key.split('/'))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
if path.check():
try:
with path.open("r") as f:
return json.load(f)
except ValueError:
self.trace("cache-invalid at %s" % (path,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.dirpath().ensure_dir()
except (py.error.EEXIST, py.error.EACCES):
self.config.warn(
code='I9', message='could not create cache path %s' % (path,)
)
return
try:
f = path.open('w')
except py.error.ENOTDIR:
self.config.warn(
code='I9', message='cache could not write path %s' % (path,))
else:
with f:
self.trace("cache-write %s: %r" % (key, value,))
json.dump(value, f, indent=2, sort_keys=True)
class LFPlugin:
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = 'lf', 'failedfirst'
self.active = any(config.getvalue(key) for key in active_keys)
if self.active:
self.lastfailed = config.cache.get("cache/lastfailed", {})
else:
self.lastfailed = {}
def pytest_report_header(self):
if self.active:
if not self.lastfailed:
mode = "run all (no recorded failures)"
else:
mode = "rerun last %d failures%s" % (
len(self.lastfailed),
" first" if self.config.getvalue("failedfirst") else "")
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if report.failed and "xfail" not in report.keywords:
self.lastfailed[report.nodeid] = True
elif not report.failed:
if report.when == "call":
self.lastfailed.pop(report.nodeid, None)
def pytest_collectreport(self, report):
passed = report.outcome in ('passed', 'skipped')
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update(
(item.nodeid, True)
for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
if not previously_failed and previously_passed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
pass
elif self.config.getvalue("failedfirst"):
items[:] = previously_failed + previously_passed
else:
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
def pytest_sessionfinish(self, session):
config = self.config
if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
return
prev_failed = config.cache.get("cache/lastfailed", None) is not None
if (session.testscollected and prev_failed) or self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
'--lf', '--last-failed', action='store_true', dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)")
group.addoption(
'--ff', '--failed-first', action='store_true', dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--cache-show', action='store_true', dest="cacheshow",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--cache-clear', action='store_true', dest="cacheclear",
help="remove all cache contents at start of test run.")
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
relpath = py.path.local().bestrelpath(config.cache._cachedir)
return "cachedir: %s" % relpath
def cacheshow(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.check():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir.join("v")
tw.sep("-", "cache values")
for valpath in vdir.visit(lambda x: x.isfile()):
key = valpath.relto(vdir).replace(valpath.sep, "/")
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, "
"will be ignored" % key)
else:
tw.line("%s contains:" % key)
stream = py.io.TextIO()
pprint(val, stream=stream)
for line in stream.getvalue().splitlines():
tw.line(" " + line)
ddir = basedir.join("d")
if ddir.isdir() and ddir.listdir():
tw.sep("-", "cache directories")
for p in basedir.join("d").visit():
#if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.isfile():
key = p.relto(basedir)
tw.line("%s is a file of length %d" % (
key, p.size()))
return 0
|
mpl-2.0
|
krafczyk/spack
|
var/spack/repos/builtin/packages/font-bh-type1/package.py
|
5
|
2082
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class FontBhType1(Package):
"""X.org bh-type1 font."""
homepage = "http://cgit.freedesktop.org/xorg/font/bh-type1"
url = "https://www.x.org/archive/individual/font/font-bh-type1-1.0.3.tar.gz"
version('1.0.3', '62d4e8f782a6a0658784072a5df5ac98')
depends_on('font-util')
depends_on('fontconfig', type='build')
depends_on('mkfontdir', type='build')
depends_on('mkfontscale', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix))
make('install')
# `make install` copies the files to the font-util installation.
# Create a fake directory to convince Spack that we actually
# installed something.
mkdir(prefix.lib)
|
lgpl-2.1
|
ddayguerrero/blogme
|
flask/lib/python3.4/site-packages/werkzeug/contrib/atom.py
|
259
|
15588
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
iso8601 = obj.isoformat()
if obj.tzinfo:
return iso8601
return iso8601 + 'Z'
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
Treated as UTC if naive datetime.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' %
(k, escape(category[k])) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
|
mit
|
alexbredo/site-packages
|
mail.py
|
1
|
2015
|
# Copyright (c) 2014 Alexander Bredo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import smtplib
from email.mime.text import MIMEText
from bredo.logger import *
class Mail(object):
def __init__(self, smtpserver, sender):
self.log = Logger('kippo-alert.log')
self.smtpserver = smtpserver
self.sender = sender
def send(self, recipient, subject, message):
try:
s = smtplib.SMTP(self.smtpserver)
msg = MIMEText(message)
msg['From'] = self.sender
msg['To'] = recipient
msg['Subject'] = subject
s.sendmail(self.sender, recipient, msg.as_string())
s.quit()
self.log.info('Mail has been successfully send.')
except Exception, e:
self.log.error('Error occured while sending Mail.' + str(e))
|
bsd-2-clause
|
matmutant/sl4a
|
python/src/Lib/lib-tk/FixTk.py
|
49
|
2844
|
import sys, os
# Delay import _tkinter until we have set TCL_LIBRARY,
# so that Tcl_FindExecutable has a chance to locate its
# encoding directory.
# Unfortunately, we cannot know the TCL_LIBRARY directory
# if we don't know the tcl version, which we cannot find out
# without import Tcl. Fortunately, Tcl will itself look in
# <TCL_LIBRARY>\..\tcl<TCL_VERSION>, so anything close to
# the real Tcl library will do.
# Expand symbolic links on Vista
try:
import ctypes
ctypes.windll.kernel32.GetFinalPathNameByHandleW
except (ImportError, AttributeError):
def convert_path(s):
return s
else:
def convert_path(s):
if isinstance(s, str):
s = s.decode("mbcs")
hdir = ctypes.windll.kernel32.\
CreateFileW(s, 0x80, # FILE_READ_ATTRIBUTES
1, # FILE_SHARE_READ
None, 3, # OPEN_EXISTING
0x02000000, # FILE_FLAG_BACKUP_SEMANTICS
None)
if hdir == -1:
# Cannot open directory, give up
return s
buf = ctypes.create_unicode_buffer(u"", 32768)
res = ctypes.windll.kernel32.\
GetFinalPathNameByHandleW(hdir, buf, len(buf),
0) # VOLUME_NAME_DOS
ctypes.windll.kernel32.CloseHandle(hdir)
if res == 0:
# Conversion failed (e.g. network location)
return s
s = buf[:res]
# Ignore leading \\?\
if s.startswith(u"\\\\?\\"):
s = s[4:]
return s
prefix = os.path.join(sys.prefix,"tcl")
if not os.path.exists(prefix):
# devdir/../tcltk/lib
prefix = os.path.join(sys.prefix, os.path.pardir, "tcltk", "lib")
prefix = os.path.abspath(prefix)
# if this does not exist, no further search is needed
if os.path.exists(prefix):
prefix = convert_path(prefix)
if not os.environ.has_key("TCL_LIBRARY"):
for name in os.listdir(prefix):
if name.startswith("tcl"):
tcldir = os.path.join(prefix,name)
if os.path.isdir(tcldir):
os.environ["TCL_LIBRARY"] = tcldir
# Compute TK_LIBRARY, knowing that it has the same version
# as Tcl
import _tkinter
ver = str(_tkinter.TCL_VERSION)
if not os.environ.has_key("TK_LIBRARY"):
v = os.path.join(prefix, 'tk'+ver)
if os.path.exists(os.path.join(v, "tclIndex")):
os.environ['TK_LIBRARY'] = v
# We don't know the Tix version, so we must search the entire
# directory
if not os.environ.has_key("TIX_LIBRARY"):
for name in os.listdir(prefix):
if name.startswith("tix"):
tixdir = os.path.join(prefix,name)
if os.path.isdir(tixdir):
os.environ["TIX_LIBRARY"] = tixdir
|
apache-2.0
|
txm/potato
|
django/conf/locale/en_GB/formats.py
|
234
|
2048
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y' # 'Oct. 25, 2006'
TIME_FORMAT = 'P' # '2:30 pm'
DATETIME_FORMAT = 'N j, Y, P' # 'Oct. 25, 2006, 2:30 pm'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'F j' # 'October 25'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%Y-%m-%d', # '2006-10-25'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
|
prarthitm/edxplatform
|
pavelib/servers.py
|
15
|
10450
|
"""
Run and manage servers for local development.
"""
from __future__ import print_function
import argparse
import sys
from paver.easy import call_task, cmdopts, consume_args, needs, sh, task
from .assets import collect_assets
from .utils.cmd import django_cmd
from .utils.process import run_process, run_multi_processes
from .utils.timer import timed
DEFAULT_PORT = {"lms": 8000, "studio": 8001}
DEFAULT_SETTINGS = 'devstack'
OPTIMIZED_SETTINGS = "devstack_optimized"
OPTIMIZED_ASSETS_SETTINGS = "test_static_optimized"
ASSET_SETTINGS_HELP = (
"Settings file used for updating assets. Defaults to the value of the settings variable if not provided."
)
def run_server(
system, fast=False, settings=None, asset_settings=None, port=None, contracts=False
):
"""Start the server for LMS or Studio.
Args:
system (str): The system to be run (lms or studio).
fast (bool): If true, then start the server immediately without updating assets (defaults to False).
settings (str): The Django settings module to use; if not provided, use the default.
asset_settings (str) The settings to use when generating assets. If not provided, assets are not generated.
port (str): The port number to run the server on. If not provided, uses the default port for the system.
contracts (bool) If true then PyContracts is enabled (defaults to False).
"""
if system not in ['lms', 'studio']:
print("System must be either lms or studio", file=sys.stderr)
exit(1)
if not settings:
settings = DEFAULT_SETTINGS
if not fast and asset_settings:
args = [system, '--settings={}'.format(asset_settings), '--watch']
# The default settings use DEBUG mode for running the server which means that
# the optimized assets are ignored, so we skip collectstatic in that case
# to save time.
if settings == DEFAULT_SETTINGS:
args.append('--skip-collect')
call_task('pavelib.assets.update_assets', args=args)
if port is None:
port = DEFAULT_PORT[system]
args = [settings, 'runserver', '--traceback', '--pythonpath=.', '0.0.0.0:{}'.format(port)]
if contracts:
args.append("--contracts")
run_process(django_cmd(system, *args))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("asset-settings=", "a", ASSET_SETTINGS_HELP),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets"),
])
def lms(options):
"""
Run the LMS server.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset-settings', settings)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server(
'lms',
fast=fast,
settings=settings,
asset_settings=asset_settings,
port=port,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("asset-settings=", "a", ASSET_SETTINGS_HELP),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets"),
])
def studio(options):
"""
Run the Studio server.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset-settings', settings)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server(
'studio',
fast=fast,
settings=settings,
asset_settings=asset_settings,
port=port,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
def devstack(args):
"""
Start the devstack lms or studio server
"""
parser = argparse.ArgumentParser(prog='paver devstack')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('--fast', action='store_true', default=False, help="Skip updating assets")
parser.add_argument('--optimized', action='store_true', default=False, help="Run with optimized assets")
parser.add_argument('--settings', type=str, default=DEFAULT_SETTINGS, help="Settings file")
parser.add_argument('--asset-settings', type=str, default=None, help=ASSET_SETTINGS_HELP)
parser.add_argument(
'--no-contracts',
action='store_true',
default=False,
help="Disable contracts. By default, they're enabled in devstack."
)
args = parser.parse_args(args)
settings = args.settings
asset_settings = args.asset_settings if args.asset_settings else settings
if args.optimized:
settings = OPTIMIZED_SETTINGS
asset_settings = OPTIMIZED_ASSETS_SETTINGS
sh(django_cmd('cms', settings, 'reindex_course', '--setup'))
run_server(
args.system[0],
fast=args.fast,
settings=settings,
asset_settings=asset_settings,
contracts=not args.no_contracts,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
])
def celery(options):
"""
Runs Celery workers.
"""
settings = getattr(options, 'settings', 'dev_with_worker')
run_process(django_cmd('lms', settings, 'celery', 'worker', '--beat', '--loglevel=INFO', '--pythonpath=.'))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings for both LMS and Studio"),
("asset-settings=", "a", "Django settings for updating assets for both LMS and Studio (defaults to settings)"),
("worker-settings=", "w", "Celery worker Django settings"),
("fast", "f", "Skip updating assets"),
("optimized", "o", "Run with optimized assets"),
("settings-lms=", "l", "Set LMS only, overriding the value from --settings (if provided)"),
("asset-settings-lms=", None, "Set LMS only, overriding the value from --asset-settings (if provided)"),
("settings-cms=", "c", "Set Studio only, overriding the value from --settings (if provided)"),
("asset-settings-cms=", None, "Set Studio only, overriding the value from --asset-settings (if provided)"),
("asset_settings=", None, "deprecated in favor of asset-settings"),
("asset_settings_cms=", None, "deprecated in favor of asset-settings-cms"),
("asset_settings_lms=", None, "deprecated in favor of asset-settings-lms"),
("settings_cms=", None, "deprecated in favor of settings-cms"),
("settings_lms=", None, "deprecated in favor of settings-lms"),
("worker_settings=", None, "deprecated in favor of worker-settings"),
])
def run_all_servers(options):
"""
Runs Celery workers, Studio, and LMS.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset_settings', settings)
worker_settings = getattr(options, 'worker_settings', 'dev_with_worker')
fast = getattr(options, 'fast', False)
optimized = getattr(options, 'optimized', False)
if optimized:
settings = OPTIMIZED_SETTINGS
asset_settings = OPTIMIZED_ASSETS_SETTINGS
settings_lms = getattr(options, 'settings_lms', settings)
settings_cms = getattr(options, 'settings_cms', settings)
asset_settings_lms = getattr(options, 'asset_settings_lms', asset_settings)
asset_settings_cms = getattr(options, 'asset_settings_cms', asset_settings)
if not fast:
# First update assets for both LMS and Studio but don't collect static yet
args = [
'lms', 'studio',
'--settings={}'.format(asset_settings),
'--skip-collect'
]
call_task('pavelib.assets.update_assets', args=args)
# Now collect static for each system separately with the appropriate settings.
# Note that the default settings use DEBUG mode for running the server which
# means that the optimized assets are ignored, so we skip collectstatic in that
# case to save time.
if settings != DEFAULT_SETTINGS:
collect_assets(['lms'], asset_settings_lms)
collect_assets(['studio'], asset_settings_cms)
# Install an asset watcher to regenerate files that change
call_task('pavelib.assets.watch_assets', options={'background': True})
# Start up LMS, CMS and Celery
lms_port = DEFAULT_PORT['lms']
cms_port = DEFAULT_PORT['studio']
lms_runserver_args = ["0.0.0.0:{}".format(lms_port)]
cms_runserver_args = ["0.0.0.0:{}".format(cms_port)]
run_multi_processes([
django_cmd(
'lms', settings_lms, 'runserver', '--traceback', '--pythonpath=.', *lms_runserver_args
),
django_cmd(
'studio', settings_cms, 'runserver', '--traceback', '--pythonpath=.', *cms_runserver_args
),
django_cmd(
'lms', worker_settings, 'celery', 'worker', '--beat', '--loglevel=INFO', '--pythonpath=.'
)
])
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("fake-initial", None, "Fake the initial migrations"),
])
@timed
def update_db(options):
"""
Migrates the lms and cms across all databases
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
fake = "--fake-initial" if getattr(options, 'fake_initial', False) else ""
for system in ('lms', 'cms'):
# pylint: disable=line-too-long
sh("NO_EDXAPP_SUDO=1 EDX_PLATFORM_SETTINGS_OVERRIDE={settings} /edx/bin/edxapp-migrate-{system} --traceback --pythonpath=. {fake}".format(
settings=settings,
system=system,
fake=fake))
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
@timed
def check_settings(args):
"""
Checks settings files.
"""
parser = argparse.ArgumentParser(prog='paver check_settings')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('settings', type=str, nargs=1, help='Django settings')
args = parser.parse_args(args)
system = args.system[0]
settings = args.settings[0]
try:
import_cmd = "echo 'import {system}.envs.{settings}'".format(system=system, settings=settings)
django_shell_cmd = django_cmd(system, settings, 'shell', '--plain', '--pythonpath=.')
sh("{import_cmd} | {shell_cmd}".format(import_cmd=import_cmd, shell_cmd=django_shell_cmd))
except: # pylint: disable=bare-except
print("Failed to import settings", file=sys.stderr)
|
agpl-3.0
|
louyihua/edx-platform
|
common/djangoapps/contentserver/admin.py
|
27
|
1455
|
"""
Django admin page for CourseAssetCacheTtlConfig, which allows you to configure the TTL
that gets used when sending cachability headers back with request course assets.
"""
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin
from .models import CourseAssetCacheTtlConfig, CdnUserAgentsConfig
class CourseAssetCacheTtlConfigAdmin(ConfigurationModelAdmin):
"""
Basic configuration for cache TTL.
"""
list_display = [
'cache_ttl'
]
def get_list_display(self, request):
"""
Restore default list_display behavior.
ConfigurationModelAdmin overrides this, but in a way that doesn't
respect the ordering. This lets us customize it the usual Django admin
way.
"""
return self.list_display
class CdnUserAgentsConfigAdmin(ConfigurationModelAdmin):
"""
Basic configuration for CDN user agent whitelist.
"""
list_display = [
'cdn_user_agents'
]
def get_list_display(self, request):
"""
Restore default list_display behavior.
ConfigurationModelAdmin overrides this, but in a way that doesn't
respect the ordering. This lets us customize it the usual Django admin
way.
"""
return self.list_display
admin.site.register(CourseAssetCacheTtlConfig, CourseAssetCacheTtlConfigAdmin)
admin.site.register(CdnUserAgentsConfig, CdnUserAgentsConfigAdmin)
|
agpl-3.0
|
ioef/tlslite-ng
|
tlslite/utils/asn1parser.py
|
206
|
1191
|
# Author: Trevor Perrin
# Patch from Google adding getChildBytes()
#
# See the LICENSE file for legal information regarding use of this file.
"""Class for parsing ASN.1"""
from .compat import *
from .codec import *
#Takes a byte array which has a DER TLV field at its head
class ASN1Parser(object):
def __init__(self, bytes):
p = Parser(bytes)
p.get(1) #skip Type
#Get Length
self.length = self._getASN1Length(p)
#Get Value
self.value = p.getFixBytes(self.length)
#Assuming this is a sequence...
def getChild(self, which):
return ASN1Parser(self.getChildBytes(which))
def getChildBytes(self, which):
p = Parser(self.value)
for x in range(which+1):
markIndex = p.index
p.get(1) #skip Type
length = self._getASN1Length(p)
p.getFixBytes(length)
return p.bytes[markIndex : p.index]
#Decode the ASN.1 DER length field
def _getASN1Length(self, p):
firstLength = p.get(1)
if firstLength<=127:
return firstLength
else:
lengthLength = firstLength & 0x7F
return p.get(lengthLength)
|
lgpl-2.1
|
gemmaan/moviesenal
|
Hasil/Lib/site-packages/pip/_vendor/ipaddress.py
|
339
|
80176
|
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
__version__ = '1.0.17'
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b'!B', b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 network. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
'%r (len %d != %d) is not permitted as an IPv%d address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?'
)
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError('address out of range')
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError('address out of range')
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
list(addr1.address_exclude(addr2)) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
list(addr1.address_exclude(addr2)) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address) + 1
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen
))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
def subnet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address <= self.network_address and
other.broadcast_address >= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
def supernet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
return (
self not in self._constants._public_network and
not self.is_private)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_public_network = IPv4Network('100.64.0.0/10')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants(object):
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
|
mit
|
bubichain/blockchain
|
src/3rd/src/jsoncpp/scons-2.1.0/engine/SCons/Script/__init__.py
|
21
|
14158
|
"""SCons.Script
This file implements the main() function used by the scons script.
Architecturally, this *is* the scons script, and will likely only be
called from the external "scons" wrapper. Consequently, anything here
should not be, or be considered, part of the build engine. If it's
something that we expect other software to want to use, it should go in
some other module. If it's specific to the "scons" script invocation,
it goes here.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Script/__init__.py 5357 2011/09/09 21:31:03 bdeegan"
import time
start_time = time.time()
import collections
import os
import sys
# Special chicken-and-egg handling of the "--debug=memoizer" flag:
#
# SCons.Memoize contains a metaclass implementation that affects how
# the other classes are instantiated. The Memoizer may add shim methods
# to classes that have methods that cache computed values in order to
# count and report the hits and misses.
#
# If we wait to enable the Memoization until after we've parsed the
# command line options normally, it will be too late, because the Memoizer
# will have already analyzed the classes that it's Memoizing and decided
# to not add the shims. So we use a special-case, up-front check for
# the "--debug=memoizer" flag and enable Memoizer before we import any
# of the other modules that use it.
_args = sys.argv + os.environ.get('SCONSFLAGS', '').split()
if "--debug=memoizer" in _args:
import SCons.Memoize
import SCons.Warnings
try:
SCons.Memoize.EnableMemoization()
except SCons.Warnings.Warning:
# Some warning was thrown. Arrange for it to be displayed
# or not after warnings are configured.
import Main
exc_type, exc_value, tb = sys.exc_info()
Main.delayed_warnings.append((exc_type, exc_value))
del _args
import SCons.Action
import SCons.Builder
import SCons.Environment
import SCons.Node.FS
import SCons.Options
import SCons.Platform
import SCons.Scanner
import SCons.SConf
import SCons.Subst
import SCons.Tool
import SCons.Util
import SCons.Variables
import SCons.Defaults
import Main
main = Main.main
# The following are global class definitions and variables that used to
# live directly in this module back before 0.96.90, when it contained
# a lot of code. Some SConscript files in widely-distributed packages
# (Blender is the specific example) actually reached into SCons.Script
# directly to use some of these. Rather than break those SConscript
# files, we're going to propagate these names into the SCons.Script
# namespace here.
#
# Some of these are commented out because it's *really* unlikely anyone
# used them, but we're going to leave the comment here to try to make
# it obvious what to do if the situation arises.
BuildTask = Main.BuildTask
CleanTask = Main.CleanTask
QuestionTask = Main.QuestionTask
#PrintHelp = Main.PrintHelp
#SConscriptSettableOptions = Main.SConscriptSettableOptions
AddOption = Main.AddOption
GetOption = Main.GetOption
SetOption = Main.SetOption
Progress = Main.Progress
GetBuildFailures = Main.GetBuildFailures
#keep_going_on_error = Main.keep_going_on_error
#print_dtree = Main.print_dtree
#print_explanations = Main.print_explanations
#print_includes = Main.print_includes
#print_objects = Main.print_objects
#print_time = Main.print_time
#print_tree = Main.print_tree
#memory_stats = Main.memory_stats
#ignore_errors = Main.ignore_errors
#sconscript_time = Main.sconscript_time
#command_time = Main.command_time
#exit_status = Main.exit_status
#profiling = Main.profiling
#repositories = Main.repositories
#
import SConscript
_SConscript = SConscript
call_stack = _SConscript.call_stack
#
Action = SCons.Action.Action
AddMethod = SCons.Util.AddMethod
AllowSubstExceptions = SCons.Subst.SetAllowableExceptions
Builder = SCons.Builder.Builder
Configure = _SConscript.Configure
Environment = SCons.Environment.Environment
#OptParser = SCons.SConsOptions.OptParser
FindPathDirs = SCons.Scanner.FindPathDirs
Platform = SCons.Platform.Platform
Return = _SConscript.Return
Scanner = SCons.Scanner.Base
Tool = SCons.Tool.Tool
WhereIs = SCons.Util.WhereIs
#
BoolVariable = SCons.Variables.BoolVariable
EnumVariable = SCons.Variables.EnumVariable
ListVariable = SCons.Variables.ListVariable
PackageVariable = SCons.Variables.PackageVariable
PathVariable = SCons.Variables.PathVariable
# Deprecated names that will go away some day.
BoolOption = SCons.Options.BoolOption
EnumOption = SCons.Options.EnumOption
ListOption = SCons.Options.ListOption
PackageOption = SCons.Options.PackageOption
PathOption = SCons.Options.PathOption
# Action factories.
Chmod = SCons.Defaults.Chmod
Copy = SCons.Defaults.Copy
Delete = SCons.Defaults.Delete
Mkdir = SCons.Defaults.Mkdir
Move = SCons.Defaults.Move
Touch = SCons.Defaults.Touch
# Pre-made, public scanners.
CScanner = SCons.Tool.CScanner
DScanner = SCons.Tool.DScanner
DirScanner = SCons.Defaults.DirScanner
ProgramScanner = SCons.Tool.ProgramScanner
SourceFileScanner = SCons.Tool.SourceFileScanner
# Functions we might still convert to Environment methods.
CScan = SCons.Defaults.CScan
DefaultEnvironment = SCons.Defaults.DefaultEnvironment
# Other variables we provide.
class TargetList(collections.UserList):
def _do_nothing(self, *args, **kw):
pass
def _add_Default(self, list):
self.extend(list)
def _clear(self):
del self[:]
ARGUMENTS = {}
ARGLIST = []
BUILD_TARGETS = TargetList()
COMMAND_LINE_TARGETS = []
DEFAULT_TARGETS = []
# BUILD_TARGETS can be modified in the SConscript files. If so, we
# want to treat the modified BUILD_TARGETS list as if they specified
# targets on the command line. To do that, though, we need to know if
# BUILD_TARGETS was modified through "official" APIs or by hand. We do
# this by updating two lists in parallel, the documented BUILD_TARGETS
# list, above, and this internal _build_plus_default targets list which
# should only have "official" API changes. Then Script/Main.py can
# compare these two afterwards to figure out if the user added their
# own targets to BUILD_TARGETS.
_build_plus_default = TargetList()
def _Add_Arguments(alist):
for arg in alist:
a, b = arg.split('=', 1)
ARGUMENTS[a] = b
ARGLIST.append((a, b))
def _Add_Targets(tlist):
if tlist:
COMMAND_LINE_TARGETS.extend(tlist)
BUILD_TARGETS.extend(tlist)
BUILD_TARGETS._add_Default = BUILD_TARGETS._do_nothing
BUILD_TARGETS._clear = BUILD_TARGETS._do_nothing
_build_plus_default.extend(tlist)
_build_plus_default._add_Default = _build_plus_default._do_nothing
_build_plus_default._clear = _build_plus_default._do_nothing
def _Set_Default_Targets_Has_Been_Called(d, fs):
return DEFAULT_TARGETS
def _Set_Default_Targets_Has_Not_Been_Called(d, fs):
if d is None:
d = [fs.Dir('.')]
return d
_Get_Default_Targets = _Set_Default_Targets_Has_Not_Been_Called
def _Set_Default_Targets(env, tlist):
global DEFAULT_TARGETS
global _Get_Default_Targets
_Get_Default_Targets = _Set_Default_Targets_Has_Been_Called
for t in tlist:
if t is None:
# Delete the elements from the list in-place, don't
# reassign an empty list to DEFAULT_TARGETS, so that the
# variables will still point to the same object we point to.
del DEFAULT_TARGETS[:]
BUILD_TARGETS._clear()
_build_plus_default._clear()
elif isinstance(t, SCons.Node.Node):
DEFAULT_TARGETS.append(t)
BUILD_TARGETS._add_Default([t])
_build_plus_default._add_Default([t])
else:
nodes = env.arg2nodes(t, env.fs.Entry)
DEFAULT_TARGETS.extend(nodes)
BUILD_TARGETS._add_Default(nodes)
_build_plus_default._add_Default(nodes)
#
help_text = None
def HelpFunction(text):
global help_text
if SCons.Script.help_text is None:
SCons.Script.help_text = text
else:
help_text = help_text + text
#
# Will be non-zero if we are reading an SConscript file.
sconscript_reading = 0
#
def Variables(files=[], args=ARGUMENTS):
return SCons.Variables.Variables(files, args)
def Options(files=[], args=ARGUMENTS):
return SCons.Options.Options(files, args)
# The list of global functions to add to the SConscript name space
# that end up calling corresponding methods or Builders in the
# DefaultEnvironment().
GlobalDefaultEnvironmentFunctions = [
# Methods from the SConsEnvironment class, above.
'Default',
'EnsurePythonVersion',
'EnsureSConsVersion',
'Exit',
'Export',
'GetLaunchDir',
'Help',
'Import',
#'SConscript', is handled separately, below.
'SConscriptChdir',
# Methods from the Environment.Base class.
'AddPostAction',
'AddPreAction',
'Alias',
'AlwaysBuild',
'BuildDir',
'CacheDir',
'Clean',
#The Command() method is handled separately, below.
'Decider',
'Depends',
'Dir',
'NoClean',
'NoCache',
'Entry',
'Execute',
'File',
'FindFile',
'FindInstalledFiles',
'FindSourceFiles',
'Flatten',
'GetBuildPath',
'Glob',
'Ignore',
'Install',
'InstallAs',
'Literal',
'Local',
'ParseDepends',
'Precious',
'Repository',
'Requires',
'SConsignFile',
'SideEffect',
'SourceCode',
'SourceSignatures',
'Split',
'Tag',
'TargetSignatures',
'Value',
'VariantDir',
]
GlobalDefaultBuilders = [
# Supported builders.
'CFile',
'CXXFile',
'DVI',
'Jar',
'Java',
'JavaH',
'Library',
'M4',
'MSVSProject',
'Object',
'PCH',
'PDF',
'PostScript',
'Program',
'RES',
'RMIC',
'SharedLibrary',
'SharedObject',
'StaticLibrary',
'StaticObject',
'Tar',
'TypeLibrary',
'Zip',
'Package',
]
for name in GlobalDefaultEnvironmentFunctions + GlobalDefaultBuilders:
exec "%s = _SConscript.DefaultEnvironmentCall(%s)" % (name, repr(name))
del name
# There are a handful of variables that used to live in the
# Script/SConscript.py module that some SConscript files out there were
# accessing directly as SCons.Script.SConscript.*. The problem is that
# "SConscript" in this namespace is no longer a module, it's a global
# function call--or more precisely, an object that implements a global
# function call through the default Environment. Nevertheless, we can
# maintain backwards compatibility for SConscripts that were reaching in
# this way by hanging some attributes off the "SConscript" object here.
SConscript = _SConscript.DefaultEnvironmentCall('SConscript')
# Make SConscript look enough like the module it used to be so
# that pychecker doesn't barf.
SConscript.__name__ = 'SConscript'
SConscript.Arguments = ARGUMENTS
SConscript.ArgList = ARGLIST
SConscript.BuildTargets = BUILD_TARGETS
SConscript.CommandLineTargets = COMMAND_LINE_TARGETS
SConscript.DefaultTargets = DEFAULT_TARGETS
# The global Command() function must be handled differently than the
# global functions for other construction environment methods because
# we want people to be able to use Actions that must expand $TARGET
# and $SOURCE later, when (and if) the Action is invoked to build
# the target(s). We do this with the subst=1 argument, which creates
# a DefaultEnvironmentCall instance that wraps up a normal default
# construction environment that performs variable substitution, not a
# proxy that doesn't.
#
# There's a flaw here, though, because any other $-variables on a command
# line will *also* be expanded, each to a null string, but that should
# only be a problem in the unusual case where someone was passing a '$'
# on a command line and *expected* the $ to get through to the shell
# because they were calling Command() and not env.Command()... This is
# unlikely enough that we're going to leave this as is and cross that
# bridge if someone actually comes to it.
Command = _SConscript.DefaultEnvironmentCall('Command', subst=1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
apache-2.0
|
abligh/xen
|
tools/python/xen/xend/XendTask.py
|
48
|
7801
|
#===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2007 XenSource Ltd
#============================================================================
from xen.xend.XendAPIConstants import XEN_API_TASK_STATUS_TYPE
from xen.xend.XendLogging import log
import thread
import threading
class XendTask(threading.Thread):
"""Represents a Asynchronous Task used by Xen API.
Basically proxies the callable object in a thread and returns the
results via self.{type,result,error_info}.
@cvar task_progress: Thread local storage for progress tracking.
It is a dict indexed by thread_id. Note that the
thread_id may be reused when the previous
thread with the thread_id ends.
@cvar task_progress_lock: lock on thread access to task_progress
"""
# progress stack:
# thread_id : [(start_task, end_task),
# (start_sub_task, end_sub_task)..]
# example : (0, 100), (50, 100) (50, 100) ...
# That would mean that the task is 75% complete.
# as it is 50% of the last 50% of the task.
task_progress = {}
task_progress_lock = threading.Lock()
def __init__(self, uuid, func, args, func_name, return_type, label, desc,
session):
"""
@param uuid: UUID of the task
@type uuid: string
@param func: Method to call (from XendAPI)
@type func: callable object
@param args: arguments to pass to function
@type args: list or tuple
@param label: name label of the task.
@type label: string
@param desc: name description of the task.
@type desc: string
@param func_name: function name, eg ('VM.start')
@type desc: string
"""
threading.Thread.__init__(self)
self.status_lock = threading.Lock()
self.status = XEN_API_TASK_STATUS_TYPE[0]
self.progress = 0
self.type = return_type
self.uuid = uuid
self.result = None
self.error_info = []
self.name_label = label or func.__name__
self.name_description = desc
self.thread_id = 0
self.func_name = func_name
self.func = func
self.args = args
self.session = session
def set_status(self, new_status):
self.status_lock.acquire()
try:
self.status = new_status
finally:
self.status_lock.release()
def get_status(self):
self.status_lock.acquire()
try:
return self.status
finally:
self.status_lock.release()
def run(self):
"""Runs the method and stores the result for later access.
Is invoked by threading.Thread.start().
"""
self.thread_id = thread.get_ident()
self.task_progress_lock.acquire()
try:
self.task_progress[self.thread_id] = {}
self.progress = 0
finally:
self.task_progress_lock.release()
try:
result = self.func(*self.args)
if result['Status'] == 'Success':
self.result = result['Value']
self.set_status(XEN_API_TASK_STATUS_TYPE[1])
else:
self.error_info = result['ErrorDescription']
self.set_status(XEN_API_TASK_STATUS_TYPE[2])
except Exception, e:
log.exception('Error running Async Task')
self.error_info = ['INTERNAL ERROR', str(e)]
self.set_status(XEN_API_TASK_STATUS_TYPE[2])
self.task_progress_lock.acquire()
try:
del self.task_progress[self.thread_id]
self.progress = 100
finally:
self.task_progress_lock.release()
def get_record(self):
"""Returns a Xen API compatible record."""
return {
'uuid': self.uuid,
'name_label': self.name_label,
'name_description': self.name_description,
'status': self.status,
'progress': self.get_progress(),
'type': self.type,
'result': self.result,
'error_info': self.error_info,
'allowed_operations': {},
'session': self.session,
}
def get_progress(self):
""" Checks the thread local progress storage. """
if self.status != XEN_API_TASK_STATUS_TYPE[0]:
return 100
self.task_progress_lock.acquire()
try:
# Pop each progress range in the stack and map it on to
# the next progress range until we find out cumulative
# progress based on the (start, end) range of each level
start = 0
prog_stack = self.task_progress.get(self.thread_id, [])[:]
if len(prog_stack) > 0:
start, stop = prog_stack.pop()
while prog_stack:
new_start, new_stop = prog_stack.pop()
start = new_start + ((new_stop - new_start)/100.0 * start)
# only update progress if it increases, this will prevent
# progress from going backwards when tasks are popped off
# the stack
if start > self.progress:
self.progress = int(start)
finally:
self.task_progress_lock.release()
return self.progress
def log_progress(cls, progress_min, progress_max,
func, *args, **kwds):
""" Callable function wrapper that logs the progress of the
function to thread local storage for task progress calculation.
This is a class method so other parts of Xend will update
the task progress by calling:
XendTask.push_progress(progress_min, progress_max,
func, *args, **kwds)
The results of the progress is stored in thread local storage
and the result of the func(*args, **kwds) is returned back
to the caller.
"""
thread_id = thread.get_ident()
retval = None
# Log the start of the method
cls.task_progress_lock.acquire()
try:
if type(cls.task_progress.get(thread_id)) != list:
cls.task_progress[thread_id] = []
cls.task_progress[thread_id].append((progress_min,
progress_max))
finally:
cls.task_progress_lock.release()
# Execute the method
retval = func(*args, **kwds)
# Log the end of the method by popping the progress range
# off the stack.
cls.task_progress_lock.acquire()
try:
cls.task_progress[thread_id].pop()
finally:
cls.task_progress_lock.release()
return retval
log_progress = classmethod(log_progress)
|
gpl-2.0
|
tcheehow/MissionPlanner
|
Lib/distutils/command/bdist_dumb.py
|
50
|
5259
|
"""distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
__revision__ = "$Id$"
import os
from sysconfig import get_python_version
from distutils.util import get_platform
from distutils.core import Command
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import DistutilsPlatformError
from distutils import log
class bdist_dumb (Command):
description = 'create a "dumb" built distribution'
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip',
'os2': 'zip' }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.relative = 0
self.owner = None
self.group = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'))
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
# OS/2 objects to any ":" characters in a filename (such as when
# a timestamp is used in a version) so change them to hyphens.
if os.name == "os2":
archive_basename = archive_basename.replace(":", "-")
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError, \
("can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root,
owner=self.owner, group=self.group)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
|
gpl-3.0
|
pypingou/mock
|
docs/conf.py
|
7
|
6297
|
# -*- coding: utf-8 -*-
#
# Mock documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 17 18:12:00 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest']
doctest_global_setup = """
import os
import sys
import mock
from mock import * # yeah, I know :-/
import unittest2
import __main__
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
# keep a reference to __main__
sys.modules['__main'] = __main__
class ProxyModule(object):
def __init__(self):
self.__dict__ = globals()
sys.modules['__main__'] = ProxyModule()
"""
doctest_global_cleanup = """
sys.modules['__main__'] = sys.modules['__main']
"""
html_theme = 'nature'
html_theme_options = {}
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = u'Mock'
copyright = u'2007-2015, Michael Foord & the mock team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents. Supplied by pbr.
#
# The short X.Y version.
version = None
# The full version, including alpha/beta/rc tags.
release = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: (Set from pbr)
today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'adctheme.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mockdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Mock.tex', u'Mock Documentation',
u'Michael Foord', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
|
bsd-2-clause
|
wtmmac/airflow
|
airflow/operators/hive_stats_operator.py
|
38
|
6082
|
from builtins import str
from builtins import zip
from collections import OrderedDict
import json
import logging
from airflow.utils import AirflowException
from airflow.hooks import PrestoHook, HiveMetastoreHook, MySqlHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class HiveStatsCollectionOperator(BaseOperator):
"""
Gathers partition statistics using a dynamically generated Presto
query, inserts the stats into a MySql table with this format. Stats
overwrite themselves if you rerun the same date/partition.
``
CREATE TABLE hive_stats (
ds VARCHAR(16),
table_name VARCHAR(500),
metric VARCHAR(200),
value BIGINT
);
``
:param table: the source table, in the format ``database.table_name``
:type table: str
:param partition: the source partition
:type partition: dict of {col:value}
:param extra_exprs: dict of expression to run against the table where
keys are metric names and values are Presto compatible expressions
:type extra_exprs: dict
:param col_blacklist: list of columns to blacklist, consider
blacklisting blobs, large json columns, ...
:type col_blacklist: list
:param assignment_func: a function that receives a column name and
a type, and returns a dict of metric names and an Presto expressions.
If None is returned, the global defaults are applied. If an
empty dictionary is returned, no stats are computed for that
column.
:type assignment_func: function
"""
template_fields = ('table', 'partition', 'ds', 'dttm')
ui_color = '#aff7a6'
@apply_defaults
def __init__(
self,
table,
partition,
extra_exprs=None,
col_blacklist=None,
assignment_func=None,
metastore_conn_id='metastore_default',
presto_conn_id='presto_default',
mysql_conn_id='airflow_db',
*args, **kwargs):
super(HiveStatsCollectionOperator, self).__init__(*args, **kwargs)
self.table = table
self.partition = partition
self.extra_exprs = extra_exprs or {}
self.col_blacklist = col_blacklist or {}
self.metastore_conn_id = metastore_conn_id
self.presto_conn_id = presto_conn_id
self.mysql_conn_id = mysql_conn_id
self.assignment_func = assignment_func
self.ds = '{{ ds }}'
self.dttm = '{{ execution_date.isoformat() }}'
def get_default_exprs(self, col, col_type):
if col in self.col_blacklist:
return {}
d = {}
d[(col, 'non_null')] = "COUNT({col})"
if col_type in ['double', 'int', 'bigint', 'float', 'double']:
d[(col, 'sum')] = 'SUM({col})'
d[(col, 'min')] = 'MIN({col})'
d[(col, 'max')] = 'MAX({col})'
d[(col, 'avg')] = 'AVG({col})'
elif col_type == 'boolean':
d[(col, 'true')] = 'SUM(CASE WHEN {col} THEN 1 ELSE 0 END)'
d[(col, 'false')] = 'SUM(CASE WHEN NOT {col} THEN 1 ELSE 0 END)'
elif col_type in ['string']:
d[(col, 'len')] = 'SUM(CAST(LENGTH({col}) AS BIGINT))'
d[(col, 'approx_distinct')] = 'APPROX_DISTINCT({col})'
return {k: v.format(col=col) for k, v in d.items()}
def execute(self, context=None):
metastore = HiveMetastoreHook(metastore_conn_id=self.metastore_conn_id)
table = metastore.get_table(table_name=self.table)
field_types = {col.name: col.type for col in table.sd.cols}
exprs = {
('', 'count'): 'COUNT(*)'
}
for col, col_type in list(field_types.items()):
d = {}
if self.assignment_func:
d = self.assignment_func(col, col_type)
if d is None:
d = self.get_default_exprs(col, col_type)
else:
d = self.get_default_exprs(col, col_type)
exprs.update(d)
exprs.update(self.extra_exprs)
exprs = OrderedDict(exprs)
exprs_str = ",\n ".join([
v + " AS " + k[0] + '__' + k[1]
for k, v in exprs.items()])
where_clause = [
"{0} = '{1}'".format(k, v) for k, v in self.partition.items()]
where_clause = " AND\n ".join(where_clause)
sql = """
SELECT
{exprs_str}
FROM {self.table}
WHERE
{where_clause};
""".format(**locals())
hook = PrestoHook(presto_conn_id=self.presto_conn_id)
logging.info('Executing SQL check: ' + sql)
row = hook.get_first(hql=sql)
logging.info("Record: " + str(row))
if not row:
raise AirflowException("The query returned None")
part_json = json.dumps(self.partition, sort_keys=True)
logging.info("Deleting rows from previous runs if they exist")
mysql = MySqlHook(self.mysql_conn_id)
sql = """
SELECT 1 FROM hive_stats
WHERE
table_name='{self.table}' AND
partition_repr='{part_json}' AND
dttm='{self.dttm}'
LIMIT 1;
""".format(**locals())
if mysql.get_records(sql):
sql = """
DELETE FROM hive_stats
WHERE
table_name='{self.table}' AND
partition_repr='{part_json}' AND
dttm='{self.dttm}';
""".format(**locals())
mysql.run(sql)
logging.info("Pivoting and loading cells into the Airflow db")
rows = [
(self.ds, self.dttm, self.table, part_json) +
(r[0][0], r[0][1], r[1])
for r in zip(exprs, row)]
mysql.insert_rows(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
]
)
|
apache-2.0
|
jcodybaker/snip
|
tests/lib/gtest-1.8.0/test/gtest_help_test.py
|
2968
|
5856
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
|
mit
|
chugunovyar/factoryForBuild
|
env/lib/python2.7/site-packages/flower/events.py
|
16
|
4189
|
from __future__ import absolute_import
from __future__ import with_statement
import time
import shelve
import logging
import threading
import collections
from functools import partial
import celery
from tornado.ioloop import PeriodicCallback
from tornado.ioloop import IOLoop
from celery.events import EventReceiver
from celery.events.state import State
from . import api
try:
from collections import Counter
except ImportError:
from .utils.backports.collections import Counter
logger = logging.getLogger(__name__)
class EventsState(State):
# EventsState object is created and accessed only from ioloop thread
def __init__(self, *args, **kwargs):
super(EventsState, self).__init__(*args, **kwargs)
self.counter = collections.defaultdict(Counter)
def event(self, event):
worker_name = event['hostname']
event_type = event['type']
self.counter[worker_name][event_type] += 1
# Send event to api subscribers (via websockets)
classname = api.events.getClassName(event_type)
cls = getattr(api.events, classname, None)
if cls:
cls.send_message(event)
# Save the event
super(EventsState, self).event(event)
class Events(threading.Thread):
events_enable_interval = 5000
def __init__(self, capp, db=None, persistent=False,
enable_events=True, io_loop=None, **kwargs):
threading.Thread.__init__(self)
self.daemon = True
self.io_loop = io_loop or IOLoop.instance()
self.capp = capp
self.db = db
self.persistent = persistent
self.enable_events = enable_events
self.state = None
if self.persistent and celery.__version__ < '3.0.15':
logger.warning('Persistent mode is available with '
'Celery 3.0.15 and later')
self.persistent = False
if self.persistent:
logger.debug("Loading state from '%s'...", self.db)
state = shelve.open(self.db)
if state:
self.state = state['events']
state.close()
if not self.state:
self.state = EventsState(**kwargs)
self.timer = PeriodicCallback(self.on_enable_events,
self.events_enable_interval)
def start(self):
threading.Thread.start(self)
# Celery versions prior to 2 don't support enable_events
if self.enable_events and celery.VERSION[0] > 2:
self.timer.start()
def stop(self):
if self.persistent:
logger.debug("Saving state to '%s'...", self.db)
state = shelve.open(self.db)
state['events'] = self.state
state.close()
def run(self):
try_interval = 1
while True:
try:
try_interval *= 2
with self.capp.connection() as conn:
recv = EventReceiver(conn,
handlers={"*": self.on_event},
app=self.capp)
try_interval = 1
recv.capture(limit=None, timeout=None, wakeup=True)
except (KeyboardInterrupt, SystemExit):
try:
import _thread as thread
except ImportError:
import thread
thread.interrupt_main()
except Exception as e:
logger.error("Failed to capture events: '%s', "
"trying again in %s seconds.",
e, try_interval)
logger.debug(e, exc_info=True)
time.sleep(try_interval)
def on_enable_events(self):
# Periodically enable events for workers
# launched after flower
try:
self.capp.control.enable_events()
except Exception as e:
logger.debug("Failed to enable events: '%s'", e)
def on_event(self, event):
# Call EventsState.event in ioloop thread to avoid synchronization
self.io_loop.add_callback(partial(self.state.event, event))
|
gpl-3.0
|
bbenko/shinkicker
|
django/core/management/commands/diffsettings.py
|
411
|
1296
|
from django.core.management.base import NoArgsCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
class Command(NoArgsCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_model_validation = False
def handle_noargs(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
keys = user_settings.keys()
keys.sort()
for key in keys:
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
return '\n'.join(output)
|
bsd-3-clause
|
RackSec/ansible
|
lib/ansible/modules/network/lenovo/cnos_vlag.py
|
59
|
13338
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send VLAG commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_vlag
author: "Dave Kasberg (@dkasberg)"
short_description: Manage VLAG resources and attributes on devices running Lenovo CNOS
description:
- This module allows you to work with virtual Link Aggregation Groups
(vLAG) related configurations. The operators used are overloaded to ensure
control over switch vLAG configurations. Apart from the regular device
connection related attributes, there are four vLAG arguments which are
overloaded variables that will perform further configurations. They are
vlagArg1, vlagArg2, vlagArg3, and vlagArg4. For more details on how to use
these arguments, see [Overloaded Variables].
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_vlag.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
vlagArg1:
description:
- This is an overloaded vlag first argument. Usage of this argument can be found is the User Guide referenced above.
required: Yes
default: Null
choices: [enable, auto-recovery,config-consistency,isl,mac-address-table,peer-gateway,priority,startup-delay,tier-id,vrrp,instance,hlthchk]
vlagArg2:
description:
- This is an overloaded vlag second argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [Interval in seconds,disable or strict,Port Aggregation Number,VLAG priority,Delay time in seconds,VLAG tier-id value,
VLAG instance number,keepalive-attempts,keepalive-interval,retry-interval,peer-ip]
vlagArg3:
description:
- This is an overloaded vlag third argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [enable or port-aggregation,Number of keepalive attempts,Interval in seconds,Interval in seconds,VLAG health check peer IP4 address]
vlagArg4:
description:
- This is an overloaded vlag fourth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [Port Aggregation Number,default or management]
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_vlag. These are written in the main.yml file of the tasks directory.
---
- name: Test Vlag - enable
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "enable"
- name: Test Vlag - autorecovery
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "auto-recovery"
vlagArg2: 266
- name: Test Vlag - config-consistency
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "config-consistency"
vlagArg2: "strict"
- name: Test Vlag - isl
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "isl"
vlagArg2: 23
- name: Test Vlag - mac-address-table
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "mac-address-table"
- name: Test Vlag - peer-gateway
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "peer-gateway"
- name: Test Vlag - priority
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "priority"
vlagArg2: 1313
- name: Test Vlag - startup-delay
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "startup-delay"
vlagArg2: 323
- name: Test Vlag - tier-id
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "tier-id"
vlagArg2: 313
- name: Test Vlag - vrrp
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "vrrp"
- name: Test Vlag - instance
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "instance"
vlagArg2: 33
vlagArg3: 333
- name: Test Vlag - instance2
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "instance"
vlagArg2: "33"
- name: Test Vlag - keepalive-attempts
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "keepalive-attempts"
vlagArg3: 13
- name: Test Vlag - keepalive-interval
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "keepalive-interval"
vlagArg3: 131
- name: Test Vlag - retry-interval
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "retry-interval"
vlagArg3: 133
- name: Test Vlag - peer ip
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "peer-ip"
vlagArg3: "1.2.3.4"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "vLAG configurations accomplished"
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
#
# Define parameters for vlag creation entry
#
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
vlagArg1=dict(required=True),
vlagArg2=dict(required=False),
vlagArg3=dict(required=False),
vlagArg4=dict(required=False),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
vlagArg1 = module.params['vlagArg1']
vlagArg2 = module.params['vlagArg2']
vlagArg3 = module.params['vlagArg3']
vlagArg4 = module.params['vlagArg4']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + \
cnos.waitForDeviceResponse(
"configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.vlagConfig(
remote_conn, deviceType, "(config)#", 2, vlagArg1, vlagArg2, vlagArg3,
vlagArg4)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="vlag configurations accomplished")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
gpl-3.0
|
jbbskinny/sympy
|
sympy/series/kauers.py
|
94
|
1807
|
from __future__ import print_function, division
def finite_diff(expression, variable, increment=1):
"""
Takes as input a polynomial expression and the variable used to construct
it and returns the difference between function's value when the input is
incremented to 1 and the original function value. If you want an increment
other than one supply it as a third argument.
Examples
========
>>> from sympy.abc import x, y, z, k, n
>>> from sympy.series.kauers import finite_diff
>>> from sympy import Sum
>>> finite_diff(x**2, x)
2*x + 1
>>> finite_diff(y**3 + 2*y**2 + 3*y + 4, y)
3*y**2 + 7*y + 6
>>> finite_diff(x**2 + 3*x + 8, x, 2)
4*x + 10
>>> finite_diff(z**3 + 8*z, z, 3)
9*z**2 + 27*z + 51
"""
expression = expression.expand()
expression2 = expression.subs(variable, variable + increment)
expression2 = expression2.expand()
return expression2 - expression
def finite_diff_kauers(sum):
"""
Takes as input a Sum instance and returns the difference between the sum
with the upper index incremented by 1 and the original sum. For example,
if S(n) is a sum, then finite_diff_kauers will return S(n + 1) - S(n).
Examples
========
>>> from sympy.series.kauers import finite_diff_kauers
>>> from sympy import Sum
>>> from sympy.abc import x, y, m, n, k
>>> finite_diff_kauers(Sum(k, (k, 1, n)))
n + 1
>>> finite_diff_kauers(Sum(1/k, (k, 1, n)))
1/(n + 1)
>>> finite_diff_kauers(Sum((x*y**2), (x, 1, n), (y, 1, m)))
(m + 1)**2*(n + 1)
>>> finite_diff_kauers(Sum((x*y), (x, 1, m), (y, 1, n)))
(m + 1)*(n + 1)
"""
function = sum.function
for l in sum.limits:
function = function.subs(l[0], l[- 1] + 1)
return function
|
bsd-3-clause
|
smscannell/crazyflie-clients-python
|
lib/cfclient/utils/input/inputinterfaces/__init__.py
|
5
|
3515
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Find all the available input interfaces and try to initialize them.
"""
import os
import glob
import logging
from ..inputreaderinterface import InputReaderInterface
__author__ = 'Bitcraze AB'
__all__ = ['InputInterface']
logger = logging.getLogger(__name__)
found_interfaces = [os.path.splitext(os.path.basename(f))[0] for f in
glob.glob(os.path.dirname(__file__) + "/[A-Za-z]*.py")]
if len(found_interfaces) == 0:
found_interfaces = [os.path.splitext(os.path.basename(f))[0] for
f in glob.glob(os.path.dirname(__file__) +
"/[A-Za-z]*.pyc")]
logger.info("Found interfaces: {}".format(found_interfaces))
initialized_interfaces = []
available_interfaces = []
for interface in found_interfaces:
try:
module = __import__(interface, globals(), locals(), [interface], 1)
main_name = getattr(module, "MODULE_MAIN")
initialized_interfaces.append(getattr(module, main_name)())
logger.info("Successfully initialized [{}]".format(interface))
except Exception as e:
logger.info("Could not initialize [{}]: {}".format(interface, e))
def devices():
# Todo: Support rescanning and adding/removing devices
if len(available_interfaces) == 0:
for reader in initialized_interfaces:
devs = reader.devices()
for dev in devs:
available_interfaces.append(InputInterface(
dev["name"], dev["id"], reader))
return available_interfaces
class InputInterface(InputReaderInterface):
def __init__(self, dev_name, dev_id, dev_reader):
super(InputInterface, self).__init__(dev_name, dev_id, dev_reader)
# These devices cannot be mapped and configured
self.supports_mapping = False
# Ask the reader if it wants to limit
# roll/pitch/yaw/thrust for all devices
self.limit_rp = dev_reader.limit_rp
self.limit_thrust = dev_reader.limit_thrust
self.limit_yaw = dev_reader.limit_yaw
def open(self):
self._reader.open(self.id)
def close(self):
self._reader.close(self.id)
def read(self, include_raw=False):
mydata = self._reader.read(self.id)
# Merge interface returned data into InputReader Data Item
for key in list(mydata.keys()):
self.data.set(key, mydata[key])
return self.data
|
gpl-2.0
|
RubikonAlpha/RIOT
|
dist/tools/pyterm/pytermcontroller/pytermcontroller.py
|
100
|
4769
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Philipp Rosenkranz <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import sys, signal, threading
from twisted.internet import reactor
from twisted.internet.protocol import Protocol, Factory
class PubProtocol(Protocol):
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
print("new connection made")
def connectionLost(self, reason):
self.factory.numProtocols = self.factory.numProtocols - 1
def connectionLost(self, reason):
self.factory.clients = {key: value for key, value in self.factory.clients.items()
if value is not self.transport}
def dataReceived(self, data):
if data.startswith("hostname: "):
remoteHostname = data.split()[1].strip()
print("dataReceived added: " + remoteHostname)
self.factory.clients[remoteHostname] = self.transport
else:
print("received some useless data...")
class PubFactory(Factory):
def __init__(self):
self.clients = dict()
def buildProtocol(self, addr):
return PubProtocol(self)
class ExperimentRunner():
def __init__(self, experiment, testbed):
self.publisher = PubFactory()
self.port = int(testbed.serverPort)
self.experiment = experiment(self.publisher, self)
self.testbed = testbed
def run(self):
signal.signal(signal.SIGINT, self.handle_sigint)
self.experiment.run()
reactor.listenTCP(self.port, self.publisher)
# clean logfiles and start nodes but don't flash nodes
self.testbed.initClean()
reactor.run()
def stop(self):
self.testbed.stop()
reactor.callFromThread(self.safeReactorStop)
def safeReactorStop(self):
if reactor.running:
try:
reactor.stop()
except:
print("tried to shutdown reactor twice!")
def handle_sigint(self, signal, frame):
self.experiment.stop()
self.testbed.stop()
self.stop() # shutdown if experiment didn't already
class Experiment():
def __init__(self, factory, runner):
self.factory = factory
self.runner = runner
self.hostid = dict()
self.sumDelay = 0
def run(self):
print("Running preHook")
self.preHook()
print("Running experiment")
self.start()
def start(self):
raise NotImplementedError("Inherit from Experiment and implement start")
def stop(self):
print("Running postHook")
self.postHook()
self.runner.stop()
def preHook(self):
pass
def postHook(self):
pass
def readHostFile(self, path):
id = 1
with open(path) as f:
for line in f:
self.hostid[line.strip()] = id
id += 1
def sendToHost(self, host=None, cmd=""):
if host in self.factory.clients:
self.factory.clients[host].write(cmd + "\n")
else:
print("sendToHost: no such host known: " + host + " !")
def sendToAll(self, cmd=""):
for host, transport in self.factory.clients.items():
self.sendToHost(host, cmd)
def pauseInSeconds(self, seconds=0):
from time import time, sleep
start = time()
while (time() - start < seconds):
sleep(seconds - (time() - start))
def callLater(self, absoluteDelay = 0.0, function = None):
reactor.callLater(absoluteDelay, function)
def waitAndCall(self, relativeDelay = 0.0, function = None):
self.sumDelay += relativeDelay
self.callLater(self.sumDelay, function)
def clientIterator(self):
return self.factory.clients.items()
def connectionByHostname(self, host=None):
if host in self.factory.clients:
return self.factory.clients[host]
@staticmethod
def sendToConnection(connection, line):
connection.write(line + "\n")
|
lgpl-2.1
|
dmillington/ansible-modules-core
|
files/template.py
|
11
|
3875
|
# this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: template
version_added: historical
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
template file and the owner uid, C(template_host) contains the node name of
the template's machine, C(template_uid) the owner, C(template_path) the
absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
a string that uses a date in the template will result in the template being marked 'changed'
each time."
options:
src:
description:
- Path of a Jinja2 formatted template on the Ansible controller. This can be a relative or absolute path.
required: true
dest:
description:
- Location to render the template to on the remote machine.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
required: false
choices: [ "yes", "no" ]
default: "yes"
notes:
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
- "Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%' , variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to [% var %] instead of {{ var }}.
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated."
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- files
- validate
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: 0644
# The same example, but using symbolic modes equivalent to 0644
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: "u=rw,g=r,o=r"
# Copy a new "sudoers" file into place, after passing validation with visudo
- template:
src: /mine/sudoers
dest: /etc/sudoers
validate: 'visudo -cf %s'
'''
|
gpl-3.0
|
spasovski/zamboni
|
apps/versions/views.py
|
3
|
4674
|
import posixpath
from django import http
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
import caching.base as caching
import commonware.log
import jingo
from mobility.decorators import mobile_template
import amo
from amo.urlresolvers import reverse
from amo.utils import HttpResponseSendFile, urlparams
from access import acl
from addons.decorators import addon_view_factory
from addons.models import Addon
from files.models import File
from versions.models import Version
# The version detail page redirects to the version within pagination, so we
# need to enforce the number of versions per page.
PER_PAGE = 30
addon_view = addon_view_factory(Addon.objects.valid)
log = commonware.log.getLogger('z.versions')
@addon_view
@mobile_template('versions/{mobile/}version_list.html')
def version_list(request, addon, template):
qs = (addon.versions.filter(files__status__in=amo.VALID_STATUSES)
.distinct().order_by('-created'))
versions = amo.utils.paginate(request, qs, PER_PAGE)
versions.object_list = list(versions.object_list)
Version.transformer(versions.object_list)
return jingo.render(request, template,
{'addon': addon, 'versions': versions})
@addon_view
def version_detail(request, addon, version_num):
qs = (addon.versions.filter(files__status__in=amo.VALID_STATUSES)
.distinct().order_by('-created'))
# Use cached_with since values_list won't be cached.
f = lambda: _find_version_page(qs, addon, version_num)
return caching.cached_with(qs, f, 'vd:%s:%s' % (addon.id, version_num))
def _find_version_page(qs, addon, version_num):
ids = list(qs.values_list('version', flat=True))
url = reverse('addons.versions', args=[addon.slug])
if version_num in ids:
page = 1 + ids.index(version_num) / PER_PAGE
to = urlparams(url, 'version-%s' % version_num, page=page)
return http.HttpResponseRedirect(to)
else:
raise http.Http404()
@addon_view
def update_info(request, addon, version_num):
qs = addon.versions.filter(version=version_num,
files__status__in=amo.VALID_STATUSES)
if not qs:
raise http.Http404()
serve_xhtml = ('application/xhtml+xml' in
request.META.get('HTTP_ACCEPT', '').lower())
return jingo.render(request, 'versions/update_info.html',
{'version': qs[0], 'serve_xhtml': serve_xhtml},
content_type='application/xhtml+xml')
def update_info_redirect(request, version_id):
version = get_object_or_404(Version, pk=version_id)
return redirect(reverse('addons.versions.update_info',
args=(version.addon.id, version.version)),
permanent=True)
# Should accept junk at the end for filename goodness.
def download_file(request, file_id, type=None):
file = get_object_or_404(File.objects, pk=file_id)
addon = get_object_or_404(Addon.objects, pk=file.version.addon_id)
if addon.is_premium():
raise PermissionDenied
if addon.is_disabled or file.status == amo.STATUS_DISABLED:
if (acl.check_addon_ownership(request, addon, viewer=True,
ignore_disabled=True) or
acl.check_reviewer(request)):
return HttpResponseSendFile(request, file.guarded_file_path,
content_type='application/xp-install')
else:
raise http.Http404()
attachment = (type == 'attachment' or not request.APP.browser)
loc = file.get_mirror(addon, attachment=attachment)
response = http.HttpResponseRedirect(loc)
response['X-Target-Digest'] = file.hash
return response
guard = lambda: Addon.objects.filter(_current_version__isnull=False)
@addon_view_factory(guard)
def download_latest(request, addon, type='xpi', platform=None):
platforms = [amo.PLATFORM_ALL.id]
if platform is not None and int(platform) in amo.PLATFORMS:
platforms.append(int(platform))
files = File.objects.filter(platform__in=platforms,
version=addon._current_version_id)
try:
# If there's a file matching our platform, it'll float to the end.
file = sorted(files, key=lambda f: f.platform_id == platforms[-1])[-1]
except IndexError:
raise http.Http404()
args = [file.id, type] if type else [file.id]
url = posixpath.join(reverse('downloads.file', args=args), file.filename)
if request.GET:
url += '?' + request.GET.urlencode()
return http.HttpResponseRedirect(url)
|
bsd-3-clause
|
AbsentMoniker/ECE463Honors
|
web2py/gluon/contrib/feedparser.py
|
22
|
165632
|
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.2"
__license__ = """
Copyright (c) 2010-2012 Kurt McKee <[email protected]>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>"]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# If you want feedparser to automatically parse microformat content embedded
# in entry contents, set this to 1
PARSE_MICROFORMATS = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing, content santizing, and
# microformat support (at least while feedparser depends on BeautifulSoup).
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# BeautifulSoup is used to extract microformat content from HTML
# feedparser is tested using BeautifulSoup 3.2.0
# http://www.crummy.com/software/BeautifulSoup/
try:
import BeautifulSoup
except ImportError:
BeautifulSoup = None
PARSE_MICROFORMATS = False
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
#try:
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
uri = urlparse.urljoin(base, uri)
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
#except:
# uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
# return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if isinstance(data, unicode):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if isinstance(s, basestring):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple:
return []
elif iPropertyType == self.STRING:
return ''
elif iPropertyType == self.DATE:
return None
elif iPropertyType == self.URI:
return ''
elif iPropertyType == self.NODE:
return None
else:
return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a':
sValue = elmResult.get('href')
elif sNodeName == 'img':
sValue = elmResult.get('src')
elif sNodeName == 'object':
sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or u''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
# Completely remove the agent element from the parse tree
elmAgent.extract()
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
# XXX - this is super ugly; properly fix this with issue 148
for i, s in enumerate(arLines):
if not isinstance(s, unicode):
arLines[i] = s.decode('utf-8', 'ignore')
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if 'href' not in attrsD:
return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1:
return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href:
continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
if segments:
tag = segments.pop()
else:
# there are no tags
continue
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', u'').split()
xfn_rels = [r for r in rels if r in self.known_xfn_relationships]
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup:
return
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
# sgmllib throws this exception when performing lookups of tags
# with non-ASCII characters in them.
return
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
try:
return _urljoin(base, rel or u'')
except ValueError:
return u''
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
try:
uri = _urljoin(base, rel)
except ValueError:
return u''
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = isinstance(data, unicode)
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
# Modified to also support MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (which basically means allowing a space as a date/time/timezone separator)
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
'|(?P<julian>\d\d\d)))?')
__tzd_re = ' ?(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)?'
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
__datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString):
return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0:
return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
# Define the strings used by the RFC822 datetime parser
_rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
_rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# Only the first three letters of the month name matter
_rfc822_month = "(?P<month>%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months))
# The year may be 2 or 4 digits; capture the century if it exists
_rfc822_year = "(?P<year>(?:\d{2})?\d{2})"
_rfc822_day = "(?P<day> *\d{1,2})"
_rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year)
_rfc822_hour = "(?P<hour>\d{2}):(?P<minute>\d{2})(?::(?P<second>\d{2}))?"
_rfc822_tz = "(?P<tz>ut|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})"
_rfc822_tznames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# The timezone may be prefixed by 'Etc/'
_rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz)
_rfc822_dayname = "(?P<dayname>%s)" % ('|'.join(_rfc822_daynames))
_rfc822_match = re.compile(
"(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time)
).match
def _parse_date_rfc822(dt):
"""Parse RFC 822 dates and times, with one minor
difference: years may be 4DIGIT or 2DIGIT.
http://tools.ietf.org/html/rfc822#section-5"""
try:
m = _rfc822_match(dt.lower()).groupdict(0)
except AttributeError:
return None
# Calculate a date and timestamp
for k in ('year', 'day', 'hour', 'minute', 'second'):
m[k] = int(m[k])
m['month'] = _rfc822_months.index(m['month']) + 1
# If the year is 2 digits, assume everything in the 90's is the 1990's
if m['year'] < 100:
m['year'] += (1900, 2000)[m['year'] < 90]
stamp = datetime.datetime(*[m[i] for i in
('year', 'month', 'day', 'hour', 'minute', 'second')])
# Use the timezone information to calculate the difference between
# the given date and timestamp and Universal Coordinated Time
tzhour = 0
tzmin = 0
if m['tz'] and m['tz'].startswith('gmt'):
# Handle GMT and GMT+hh:mm timezone syntax (the trailing
# timezone info will be handled by the next `if` block)
m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt'
if not m['tz']:
pass
elif m['tz'].startswith('+'):
tzhour = int(m['tz'][1:3])
tzmin = int(m['tz'][3:])
elif m['tz'].startswith('-'):
tzhour = int(m['tz'][1:3]) * -1
tzmin = int(m['tz'][3:]) * -1
else:
tzhour = _rfc822_tznames[m['tz']]
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in UTC
return (stamp - delta).utctimetuple()
registerDateHandler(_parse_date_rfc822)
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_rfc822_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = u''
xml_encoding = u''
rfc3023_encoding = u''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = u'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = u'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = u'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = u'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = u'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = u'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = u'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = u'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
u'u16', u'utf-16', u'utf16', u'utf_16',
u'u32', u'utf-32', u'utf32', u'utf_32',
u'iso-10646-ucs-2', u'iso-10646-ucs-4',
u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, unicode):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd',
u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or u'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == u'gb2312':
rfc3023_encoding = u'gb18030'
if xml_encoding.lower() == u'gb2312':
xml_encoding = u'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
chardet_encoding = None
tried_encodings = []
if chardet:
chardet_encoding = unicode(chardet.detect(data)['encoding'] or '', 'ascii', 'ignore')
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + u'\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = u''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = u'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = filter(match_safe_entities, entity_results)
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
|
gpl-2.0
|
eadgarchen/tensorflow
|
tensorflow/python/kernel_tests/numerics_test.py
|
45
|
5057
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.numerics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.platform import test
class VerifyTensorAllFiniteTest(test.TestCase):
def testVerifyTensorAllFiniteSucceeds(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
with self.test_session(use_gpu=True):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t,
"Input is not a number.")
self.assertAllClose(x, t_verified.eval())
def testVerifyTensorAllFiniteFails(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
my_msg = "Input is not a number."
# Test NaN.
x[0] = np.nan
with self.test_session(use_gpu=True):
with self.assertRaisesOpError(my_msg):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t, my_msg)
t_verified.eval()
# Test Inf.
x[0] = np.inf
with self.test_session(use_gpu=True):
with self.assertRaisesOpError(my_msg):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t, my_msg)
t_verified.eval()
class NumericsTest(test.TestCase):
def testInf(self):
with self.test_session(graph=ops.Graph()):
t1 = constant_op.constant(1.0)
t2 = constant_op.constant(0.0)
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf"):
a.eval()
def testNaN(self):
with self.test_session(graph=ops.Graph()):
t1 = constant_op.constant(0.0)
t2 = constant_op.constant(0.0)
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("NaN"):
a.eval()
def testBoth(self):
with self.test_session(graph=ops.Graph()):
t1 = constant_op.constant([1.0, 0.0])
t2 = constant_op.constant([0.0, 0.0])
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf and NaN"):
a.eval()
def testPassThrough(self):
with self.test_session(graph=ops.Graph()):
t1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
checked = array_ops.check_numerics(t1, message="pass through test")
value = checked.eval()
self.assertAllEqual(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), value)
self.assertEqual([2, 3], checked.get_shape())
def testControlFlowCond(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.cond(predicate,
lambda: constant_op.constant([37.]),
lambda: constant_op.constant([42.]))
with self.assertRaisesRegexp(
ValueError,
r"`tf\.add_check_numerics_ops\(\) is not compatible with "
r"TensorFlow control flow operations such as `tf\.cond\(\)` "
r"or `tf.while_loop\(\)`\."):
numerics.add_check_numerics_ops()
def testControlFlowWhile(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.while_loop(lambda _: predicate,
lambda _: constant_op.constant([37.]),
[constant_op.constant([42.])])
with self.assertRaisesRegexp(
ValueError,
r"`tf\.add_check_numerics_ops\(\) is not compatible with "
r"TensorFlow control flow operations such as `tf\.cond\(\)` "
r"or `tf.while_loop\(\)`\."):
numerics.add_check_numerics_ops()
if __name__ == "__main__":
test.main()
|
apache-2.0
|
alexmilesyounger/ds_basics
|
src/numpy_utils.py
|
2
|
3188
|
# coding: utf-8
# numpy_utils for Intro to Data Science with Python
# Author: Kat Chuang
# Created: Nov 2014
# --------------------------------------
import numpy
## Stage 2 begin
fieldNames = ['', 'id', 'priceLabel', 'name','brandId', 'brandName', 'imageLink',
'desc', 'vendor', 'patterned', 'material']
dataTypes = [('myint', 'i'), ('myid', 'i'), ('price', 'f8'), ('name', 'a200'),
('brandId', '<i8'), ('brandName', 'a200'), ('imageUrl', '|S500'),
('description', '|S900'), ('vendor', '|S100'), ('pattern', '|S50'), ('material', '|S50'), ]
def load_data(filename):
my_csv = numpy.genfromtxt(filename, delimiter='\t', skip_header=1,
names=fieldNames, invalid_raise=False,
dtype=dataTypes)
return my_csv
#2.a count
def size(my_csv):
print("Length (numpy): {}".format(my_csv.size))
#2.b sum
def calculate_numpy_sum(my_field):
field_in_float = [float(item) for item in my_field]
total = numpy.sum(field_in_float)
return total
#2.c mean
def find_numpy_average(my_field):
field_in_float = [float(item) for item in my_field]
total = calculate_numpy_sum(field_in_float)
size = len(my_field)
average = total / size
return average
#2.d max, min
def numpy_max(my_field_in_float):
return numpy.amax(my_field_in_float)
def numpy_min(my_field_in_float):
return numpy.amin(my_field_in_float)
## Stage 2 end
# --------------------------------------
## Stage 3 begin
from my_utils import filter_col_by_string, filter_col_by_float
## Stage 3 end
# --------------------------------------
## Stage 4 begin
from my_utils import write_to_file, write_brand_and_price_to_file
## Stage 4 end
# --------------------------------------
## Stage 5 begin
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_all_bars(prices_in_float, exported_figure_filename):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
prices = list(map(int, prices_in_float))
X = numpy.arange(len(prices))
width = 0.25
ax.bar(X+width, prices, width)
ax.set_xlim([0, 5055])
fig.savefig(exported_figure_filename)
def create_chart_for_embed(sample, title):
prices = sorted(map(int, sample))
x_axis_ticks = list( range(len(sample)) )
plt.plot(x_axis_ticks, prices, 'g', label='price points', linewidth=2)
def export_chart(sample, title):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
prices = sorted(map(int, sample))
x_axis_ticks = list( range(len(sample)) )
ax.plot(x_axis_ticks, prices, 'g', label='price points', linewidth=2)
ax.set_title(title)
ax.set_xlabel(title)
ax.set_ylabel('Number of Ties')
if len(prices) > 20:
ax.set_xlim([0, round(len(prices), -1)])
else:
ax.set_xlim([0, len(prices)])
fig.savefig('_charts/' + title + '.png')
def prices_of_list(sampleData):
temp_list = []
for row in sampleData[1:]:
priceCol = float(row[2])
temp_list.append(priceCol)
return temp_list
## Stage 5 end
# --------------------------------------
## Stage 6 begin
## Stage 6 end
# --------------------------------------
|
mit
|
sameetb-cuelogic/edx-platform-test
|
common/djangoapps/student/management/commands/add_to_group.py
|
182
|
1968
|
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User, Group
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--list',
action='store_true',
dest='list',
default=False,
help='List available groups'),
make_option('--create',
action='store_true',
dest='create',
default=False,
help='Create the group if it does not exist'),
make_option('--remove',
action='store_true',
dest='remove',
default=False,
help='Remove the user from the group instead of adding it'),
)
args = '<user|email> <group>'
help = 'Add a user to a group'
def print_groups(self):
print 'Groups available:'
for group in Group.objects.all().distinct():
print ' ', group.name
def handle(self, *args, **options):
if options['list']:
self.print_groups()
return
if len(args) != 2:
raise CommandError('Usage is add_to_group {0}'.format(self.args))
name_or_email, group_name = args
if '@' in name_or_email:
user = User.objects.get(email=name_or_email)
else:
user = User.objects.get(username=name_or_email)
try:
group = Group.objects.get(name=group_name)
except Group.DoesNotExist:
if options['create']:
group = Group(name=group_name)
group.save()
else:
raise CommandError('Group {} does not exist'.format(group_name))
if options['remove']:
user.groups.remove(group)
else:
user.groups.add(group)
print 'Success!'
|
agpl-3.0
|
flavour/tldrmp
|
modules/tests/inv/create_item.py
|
25
|
2382
|
# -*- coding: utf-8 -*-
""" Sahana Eden Automated Tests - INV005 Create Item
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class CreateItem(SeleniumUnitTest):
def test_inv005_create_item(self):
"""
@case: INV005
@description: Create an Item
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
# Login, if not-already done so
self.login(account="admin", nexturl="asset/item/create")
self.browser.find_element_by_id("supply_item_um").clear()
self.create("supply_item",
[( "name",
"Soup" ),
( "um",
"litre" ),
( "item_category_id",
"Standard > Food"),
( "model",
"Tomato" ),
( "year",
"2012" ),
( "comments",
"This is a Test Item" )]
)
|
mit
|
XiaodunServerGroup/ddyedx
|
cms/envs/common.py
|
1
|
18114
|
# -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614
import sys
import json
import lms.envs.common
from lms.envs.common import (
USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, BUGS_EMAIL, DOC_STORE_CONFIG, ALL_LANGUAGES
)
from path import path
from lms.lib.xblock.mixin import LmsBlockMixin
from cms.lib.xblock.mixin import CmsBlockMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.x_module import XModuleMixin, prefer_xmodules
from dealer.git import git
############################ FEATURE CONFIGURATION #############################
FEATURES = {
'USE_DJANGO_PIPELINE': True,
'GITHUB_PUSH': False,
'ENABLE_DISCUSSION_SERVICE': False,
'AUTH_USE_CERTIFICATES': False,
# email address for studio staff (eg to request course creation)
'STUDIO_REQUEST_EMAIL': '',
'STUDIO_NPS_SURVEY': True,
# Segment.io - must explicitly turn it on for production
'SEGMENT_IO': False,
# Enable URL that shows information about the status of various services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for course authors
'AUTOPLAY_VIDEOS': False,
# If set to True, new Studio users won't be able to author courses unless
# edX has explicitly added them to the course creator group.
'ENABLE_CREATOR_GROUP': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# If set to True, Studio won't restrict the set of advanced components
# to just those pre-approved by edX
'ALLOW_ALL_ADVANCED_COMPONENTS': False,
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Allow editing of short description in course settings in cms
'EDITABLE_SHORT_DESCRIPTION': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles embargo functionality
'EMBARGO': False,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
}
ENABLE_JASMINE = False
########### course fields #############
# COURSE_EXTEND_FIELDS = lms.envs.common.COURSE_EXTEND_FIELDS
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
LMS_ROOT = REPO_ROOT / "lms"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
GITHUB_REPO_ROOT = ENV_ROOT / "data"
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
############################# WEB CONFIGURATION #############################
# This is where we stick our compiled template files.
from tempdir import mkdtemp_clean
MAKO_MODULE_DIR = mkdtemp_clean('mako')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_js' / 'templates',
]
for namespace, template_dirs in lms.envs.common.MAKO_TEMPLATES.iteritems():
MAKO_TEMPLATES['lms.' + namespace] = template_dirs
TEMPLATE_DIRS = MAKO_TEMPLATES['main']
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/signin'
LOGIN_URL = EDX_ROOT_URL + '/signin'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
'dealer.contrib.django.staff.context_processor', # access git revision
'contentstore.context_processors.doc_url',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
LMS_BASE = None
#################### CAPA External Code Evaluation #############################
XQUEUE_INTERFACE = {
'url': 'http://localhost:8888',
'django_auth': {'username': 'local',
'password': 'local'},
'basic_auth': None,
}
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'method_override.middleware.MethodOverrideMiddleware',
# Instead of AuthenticationMiddleware, we use a cache-backed version
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############# XBlock Configuration ##########
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, CmsBlockMixin, InheritanceMixin, XModuleMixin)
# Allow any XBlock in Studio
# You should also enable the ALLOW_ALL_ADVANCED_COMPONENTS feature flag, so that
# xblocks can be added via advanced settings
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############################ SIGNAL HANDLERS ################################
# This is imported to register the exception signal handling that logs exceptions
import monitoring.exceptions # noqa
############################ DJANGO_BUILTINS ################################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
# Site info
SITE_ID = 1
SITE_NAME = "0.0.0.0:8001"
HTTPS = 'on'
ROOT_URLCONF = 'cms.urls'
IGNORABLE_404_ENDS = ('favicon.ico')
# Email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.126.com'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_HOST_USER = 'xiaodunxin'
EMAIL_HOST_PASSWORD = '123456qr'
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/' + git.revision + "/"
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ENV_ROOT / "staticfiles" / git.revision
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
LMS_ROOT / "static",
# This is how you would use the textbook images locally
# ("book", ENV_ROOT / "book_images"),
]
# Locale/Internationalization
TIME_ZONE = 'Asia/Shanghai' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'zh-cn' # http://www.i18nguy.com/unicode/language-identifiers.html
SITE_NAME = 'mooc.diandiyun.com:18010'
LANGUAGES = lms.envs.common.LANGUAGES
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# If this is true, random scores will be generated for the purpose of debugging the profile graphs
GENERATE_PROFILE_SCORES = False
############################### Pipeline #######################################
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from rooted_paths import rooted_glob
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/normalize.css',
'css/vendor/font-awesome.css',
'css/vendor/html5-input-polyfills/number-polyfill.css',
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
'js/vendor/markitup/sets/wiki/style.css',
],
'output_filename': 'css/cms-style-vendor.css',
},
'style-app': {
'source_filenames': [
'sass/style-app.css',
],
'output_filename': 'css/cms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/style-app-extend1.css',
],
'output_filename': 'css/cms-style-app-extend1.css',
},
'style-xmodule': {
'source_filenames': [
'sass/style-xmodule.css',
],
'output_filename': 'css/cms-style-xmodule.css',
},
'style-calendar-vendor': {
'source_filenames': [
'css/vendor/fullcalendar/fullcalendar.css',
'css/vendor/fullcalendar/fullcalendar_s.css',
'css/vendor/fullcalendar/fullcalendar.print.css',
],
'output_filename': 'css/lms-style-fullcalendar-vendor.css',
}
}
fullcalendar_vendor_js = [
'js/vendor/fullcalendar/moment.min.js',
'js/vendor/fullcalendar/fullcalendar.min.js',
'js/vendor/fullcalendar/jquery-ui.custom.min.js',
'js/vendor/fullcalendar/lang-all.js',
]
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'module-js': {
'source_filenames': (
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/discussion/*.js')
),
'output_filename': 'js/cms-modules.js',
'test_order': 1
},
'calendar_vendor': {
'source_filenames': fullcalendar_vendor_js,
'output_filename': 'js/lms-fullcalendar_vendor.js',
'test_order': 0,
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc"
# it would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############################## Video ##########################################
# URL to test YouTube availability
YOUTUBE_TEST_URL = 'https://gdata.youtube.com/feeds/api/videos/'
############################ APPS #####################################
INSTALLED_APPS = (
# Standard apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'djcelery',
'south',
'method_override',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# Testing
'django_nose',
# For CMS
'contentstore',
'course_creators',
'student', # misleading name due to sharing with lms
'course_groups', # not used in cms (yet), but tests run
# Tracking
'track',
'eventtracking.django',
# Monitoring
'datadog',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# comment common
'django_comment_common',
# for course creator table
'django.contrib.admin',
# XBlocks containing migrations
'mentoring',
# for managing course modes
'course_modes',
# Dark-launching languages
'dark_lang',
# Student identity reverification
'reverification',
# User preferences
'user_api',
'django_openid_auth',
'embargo',
)
################# EDX MARKETING SITE ##################################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
}
COURSES_WITH_UNSAFE_CODE = []
############################## EVENT TRACKING #################################
TRACK_MAX_EVENT = 10000
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
TRACKING_ENABLED = True
# Current youtube api for requesting transcripts.
# for example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
YOUTUBE_API = {
'url': "http://video.google.com/timedtext",
'params': {'lang': 'en', 'v': 'set_youtube_id_of_11_symbols_here'}
}
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
### JSdraw (only installed in some instances)
try:
import edx_jsdraw
except ImportError:
pass
else:
INSTALLED_APPS += ('edx_jsdraw',)
############## SSO KEY ################
SSO_KEY = "SSOFOUNDER"
############## user auth ##############
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher'
)
############## SSO KEY ################
SSO_KEY = "SSOFOUNDER"
############## BUSINESS SYSTEM #################
XIAODUN_BACK_HOST = 'http://busi.xiaodun.cn/app'
############## video mettings ##################
VEDIO_MEETING_DOMAIN = "http://passport.guoshi.com/mp"
############## wenjuan domain ##################
WENJUAN_DOMAIN = "http://apitest.wenjuan.com:8000"
####### wenjuan secret_key #######
WENJUAN_SECKEY = "9d15a674a6e621058f1ea9171413b7c0"
|
agpl-3.0
|
wanderine/nipype
|
nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py
|
9
|
1130
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.afni.preprocess import AFNItoNIFTI
def test_AFNItoNIFTI_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-1,
),
out_file=dict(argstr='-prefix %s',
name_source='in_file',
name_template='%s.nii',
),
outputtype=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = AFNItoNIFTI.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_AFNItoNIFTI_outputs():
output_map = dict(out_file=dict(),
)
outputs = AFNItoNIFTI.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
bsd-3-clause
|
HonzaKral/django
|
tests/generic_inline_admin/tests.py
|
15
|
22206
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.core.urlresolvers import reverse
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from .admin import MediaInline, MediaPermanentInline, site as admin_site
from .models import Category, Episode, EpisodePermanent, Media
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
# Set DEBUG to True to ensure {% include %} will raise exceptions.
# That is how inlines are rendered and #9498 will bubble up if it is an issue.
@override_settings(
DEBUG=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls",
)
class GenericAdminViewTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse('admin:generic_inline_admin_episode_add'))
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:generic_inline_admin_episode_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_generic_inline_formset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def test_generic_inline_formset_factory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminParametersTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
self.factory = RequestFactory()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def test_no_param(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def test_extra_param(self):
"""
With extra=0, there should be one form.
"""
class ExtraInline(GenericTabularInline):
model = Media
extra = 0
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [ExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
class MaxNumInline(GenericTabularInline):
model = Media
extra = 5
max_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def test_min_num_param(self):
"""
With extra=3 and min_num=2, there should be five forms.
"""
class MinNumInline(GenericTabularInline):
model = Media
extra = 3
min_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 5)
self.assertEqual(formset.initial_form_count(), 1)
def test_get_extra(self):
class GetExtraInline(GenericTabularInline):
model = Media
extra = 4
def get_extra(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.extra, 2)
def test_get_min_num(self):
class GetMinNumInline(GenericTabularInline):
model = Media
min_num = 5
def get_min_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.min_num, 2)
def test_get_max_num(self):
class GetMaxNumInline(GenericTabularInline):
model = Media
extra = 5
def get_max_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.max_num, 2)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminWithUniqueTogetherTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
def test_add(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get(reverse('admin:generic_inline_admin_contact_add'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('admin:generic_inline_admin_contact_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class NoInlineDeletionTest(SimpleTestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineModelAdminTest(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
def test_get_formsets_with_inlines_returns_tuples(self):
"""
Ensure that get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
|
bsd-3-clause
|
insta-code1/Instafit-ecommerce-Django
|
Scripts/enhancer.py
|
1
|
1522
|
#!d:\e-commers\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
try:
from tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
except ImportError:
from Tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
from PIL import Image, ImageTk, ImageEnhance
import sys
#
# enhancer widget
class Enhance(Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
Label(self, image=self.tkim).pack()
# scale
s = Scale(self, label=name, orient=HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = eval(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
root = Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
|
mit
|
thesuperzapper/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py
|
71
|
12923
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
creyesp/RF_Estimation
|
Clustering/clustering/gmm.py
|
2
|
6063
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SpectralClustering.py
#
# Copyright 2014 Carlos "casep" Sepulveda <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Performs SpectralClustering using scikit-learn
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..','LIB'))
import rfestimationLib as rfe
import argparse #argument parsing
import numpy as np
import scipy.ndimage
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from sklearn import mixture
from sklearn import metrics
clustersColours = ['#fcfa00', '#ff0000', '#820c2c', '#ff006f', '#af00ff','#0200ff','#008dff','#00e8ff','#0c820e','#28ea04','#ea8404','#c8628f','#6283ff','#5b6756','#0c8248','k','#820cff','#932c11','#002c11','#829ca7']
def main():
parser = argparse.ArgumentParser(prog='kmeans_scikit.py',
description='Performs K-means using scikit-learn',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
parser.add_argument('--clustersNumber',
help='Number of clusters',
type=int, default='5', choices=[3,4,5,6,7,8,9,10,11,12,13,14,15], required=False)
parser.add_argument('--framesNumber',
help='Number of frames used in STA analysis',
type=int, default='20', required=False)
parser.add_argument('--blockSize',
help='Size of each block in micrometres',
type=int, default='50', required=False)
args = parser.parse_args()
#Source folder of the files with the timestamps
sourceFolder = rfe.fixPath(args.sourceFolder)
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
sys.exit()
#Output folder for the graphics
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
sys.exit()
#Clusters number for the kmeans algorithm
clustersNumber = args.clustersNumber
#Frames used in STA analysis
framesNumber = args.framesNumber
#Size of each block in micrometres
blockSize = args.blockSize
#dataCluster stores the data to be used for the clustering process
#the size is equal to the number of frames, aka, the time component
#plus 7 as we are incorporating the 2 dimensions of the ellipse,
#2 dimensions of the ellipse on micrometres,
#x position, y position and angle
dataCluster = np.zeros((1,framesNumber+7))
units=[]
dato=np.zeros((1,1))
for unitFile in os.listdir(sourceFolder):
if os.path.isdir(sourceFolder+unitFile):
unitName = unitFile.rsplit('_', 1)[0]
dataUnit, coordinates = rfe.loadSTACurve(sourceFolder,unitFile,unitName)
xSize = dataUnit.shape[0]
ySize = dataUnit.shape[1]
fitResult = rfe.loadFitMatrix(sourceFolder,unitFile)
#should we use the not-gaussian-fitted data for clustering?
dataUnitGauss = scipy.ndimage.gaussian_filter(dataUnit[coordinates[0][0],[coordinates[1][0]],:],2)
#A radius of the RF ellipse, adjusted to micrometres
dato[0] = blockSize * fitResult[0][2]
dataUnitCompleta = np.concatenate((dataUnitGauss,dato),1)
#B radius of the RF ellipse, adjusted to micrometres
dato[0] = blockSize * fitResult[0][3]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#A radius of the RF ellipse
dato[0] = fitResult[0][2]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#B radius of the RF ellipse
dato[0] = fitResult[0][3]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#angle of the RF ellipse
dato[0] = fitResult[0][1]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#X coordinate of the RF ellipse
dato[0] = fitResult[0][4]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#Y coordinate of the RF ellipse
dato[0] = fitResult[0][5]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
dataCluster = np.append(dataCluster,dataUnitCompleta, axis=0)
units.append(unitName)
# remove the first row of zeroes
dataCluster = dataCluster[1:,:]
data = dataCluster[:,0:framesNumber+2]
gmix = mixture.GMM(n_components=clustersNumber, covariance_type='full')
gmix.fit(data)
labels = gmix.predict(data)
fit = metrics.silhouette_score(data, labels, metric='euclidean')
rfe.graficaCluster(labels, dataCluster[:,0:framesNumber-1], outputFolder+'gmm.png', clustersColours, fit)
# generate graphics of all ellipses
for clusterId in range(clustersNumber):
dataGrilla = np.zeros((1,framesNumber+7))
for unitId in range(dataCluster.shape[0]):
if labels[unitId] == clusterId:
datos=np.zeros((1,framesNumber+7))
datos[0]=dataCluster[unitId,:]
dataGrilla = np.append(dataGrilla,datos, axis=0)
## remove the first row of zeroes
dataGrilla = dataGrilla[1:,:]
rfe.graficaGrilla(dataGrilla, outputFolder+'Grilla_'+str(clusterId)+'.png', clustersColours[clusterId], framesNumber, xSize, ySize)
rfe.graficaCluster(labels, dataGrilla[:,0:framesNumber-1], outputFolder+'cluster_'+str(clusterId)+'.png', clustersColours[clusterId])
rfe.guardaClustersIDs(outputFolder, units, labels, outputFolder+'clusterings.csv')
return 0
if __name__ == '__main__':
main()
|
gpl-2.0
|
txamqp/txamqp
|
src/examples/simple/txconsumer.py
|
1
|
2216
|
from twisted.internet.defer import inlineCallbacks
from twisted.internet import reactor
from twisted.internet.protocol import ClientCreator
from twisted.python import log
from txamqp.protocol import AMQClient
from txamqp.client import TwistedDelegate
import txamqp.spec
@inlineCallbacks
def gotConnection(conn, username, password):
print("Connected to broker.")
yield conn.authenticate(username, password)
print("Authenticated. Ready to receive messages")
chan = yield conn.channel(1)
yield chan.channel_open()
yield chan.queue_declare(queue="chatrooms", durable=True, exclusive=False, auto_delete=False)
yield chan.exchange_declare(exchange="chatservice", type="direct", durable=True, auto_delete=False)
yield chan.queue_bind(queue="chatrooms", exchange="chatservice", routing_key="txamqp_chatroom")
yield chan.basic_consume(queue='chatrooms', no_ack=True, consumer_tag="testtag")
queue = yield conn.queue("testtag")
while True:
msg = yield queue.get()
print('Received: {0} from channel #{1}'.format(msg.content.body, chan.id))
if msg.content.body == "STOP":
break
yield chan.basic_cancel("testtag")
yield chan.channel_close()
chan0 = yield conn.channel(0)
yield chan0.connection_close()
reactor.stop()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Example consumer",
usage="%(prog)s localhost 5672 / guest guest ../../specs/standard/amqp0-8.stripped.xml"
)
parser.add_argument('host')
parser.add_argument('port', type=int)
parser.add_argument('vhost')
parser.add_argument('username')
parser.add_argument('password')
parser.add_argument('spec_path')
args = parser.parse_args()
spec = txamqp.spec.load(args.spec_path)
delegate = TwistedDelegate()
d = ClientCreator(reactor, AMQClient, delegate=delegate, vhost=args.vhost,
spec=spec).connectTCP(args.host, args.port)
d.addCallback(gotConnection, args.username, args.password)
def whoops(err):
if reactor.running:
log.err(err)
reactor.stop()
d.addErrback(whoops)
reactor.run()
|
apache-2.0
|
platformio/platformio-core
|
platformio/debug/exception.py
|
2
|
1138
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.exception import PlatformioException, UserSideException
class DebugError(PlatformioException):
pass
class DebugSupportError(DebugError, UserSideException):
MESSAGE = (
"Currently, PlatformIO does not support debugging for `{0}`.\n"
"Please request support at https://github.com/platformio/"
"platformio-core/issues \nor visit -> https://docs.platformio.org"
"/page/plus/debugging.html"
)
class DebugInvalidOptionsError(DebugError, UserSideException):
pass
|
apache-2.0
|
Alkemic/yaCBV
|
yacbv/mixins.py
|
1
|
1041
|
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.http.response import HttpResponseNotAllowed
class HttpMethodRestrictMixin(object):
allowed_methods = [
'options', 'get', 'head', 'post',
'put', 'delete', 'trace', 'connect',
]
def dispatch(self, request, *args, **kwargs):
method_name = request.method.lower()
if method_name not in self.allowed_methods:
return HttpResponseNotAllowed(self.allowed_methods)
super(HttpMethodRestrictMixin, self).dispatch(request, *args, **kwargs)
class TemplateMixin(object):
"""In this case, method must return dict, that is used to populate data
in template"""
template_name = None
def dispatch(self, request, *args, **kwargs):
context = super(TemplateMixin, self).dispatch(request, *args, **kwargs)
return render_to_response(
self.template_name,
context,
context_instance=RequestContext(request),
)
|
mit
|
slipstream/SlipStreamClient
|
client/src/external/chardet/mbcssm.py
|
289
|
25481
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import MachineState
# BIG5
BIG5_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
)
BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
BIG5_SM_MODEL = {'class_table': BIG5_CLS,
'class_factor': 5,
'state_table': BIG5_ST,
'char_len_table': BIG5_CHAR_LEN_TABLE,
'name': 'Big5'}
# CP949
CP949_CLS = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_ST = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
)
CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949_SM_MODEL = {'class_table': CP949_CLS,
'class_factor': 10,
'state_table': CP949_ST,
'char_len_table': CP949_CHAR_LEN_TABLE,
'name': 'CP949'}
# EUC-JP
EUCJP_CLS = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_ST = (
3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
)
EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
'class_factor': 6,
'state_table': EUCJP_ST,
'char_len_table': EUCJP_CHAR_LEN_TABLE,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_ST = (
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
)
EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
'class_factor': 4,
'state_table': EUCKR_ST,
'char_len_table': EUCKR_CHAR_LEN_TABLE,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_CLS = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_ST = (
MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
'class_factor': 7,
'state_table': EUCTW_ST,
'char_len_table': EUCTW_CHAR_LEN_TABLE,
'name': 'x-euc-tw'}
# GB2312
GB2312_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_ST = (
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validating
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
GB2312_SM_MODEL = {'class_table': GB2312_CLS,
'class_factor': 7,
'state_table': GB2312_ST,
'char_len_table': GB2312_CHAR_LEN_TABLE,
'name': 'GB2312'}
# Shift_JIS
SJIS_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
)
SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
SJIS_SM_MODEL = {'class_table': SJIS_CLS,
'class_factor': 6,
'state_table': SJIS_ST,
'char_len_table': SJIS_CHAR_LEN_TABLE,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_ST = (
5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17
6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27
5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
'class_factor': 6,
'state_table': UCS2BE_ST,
'char_len_table': UCS2BE_CHAR_LEN_TABLE,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_ST = (
6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27
5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
'class_factor': 6,
'state_table': UCS2LE_ST,
'char_len_table': UCS2LE_CHAR_LEN_TABLE,
'name': 'UTF-16LE'}
# UTF-8
UTF8_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_ST = (
MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
)
UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8_SM_MODEL = {'class_table': UTF8_CLS,
'class_factor': 16,
'state_table': UTF8_ST,
'char_len_table': UTF8_CHAR_LEN_TABLE,
'name': 'UTF-8'}
|
apache-2.0
|
bbenko/shinkicker
|
django/contrib/localflavor/in_/forms.py
|
309
|
1741
|
"""
India-specific Form helpers.
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import gettext
import re
class INZipCodeField(RegexField):
default_error_messages = {
'invalid': gettext(u'Enter a zip code in the format XXXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(INZipCodeField, self).__init__(r'^\d{6}$',
max_length=None, min_length=None, *args, **kwargs)
class INStateField(Field):
"""
A form field that validates its input is a Indian state name or
abbreviation. It normalizes the input to the standard two-letter vehicle
registration abbreviation for the given state or union territory
"""
default_error_messages = {
'invalid': u'Enter a Indian state or territory.',
}
def clean(self, value):
from in_states import STATES_NORMALIZED
super(INStateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return smart_unicode(STATES_NORMALIZED[value.strip().lower()])
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class INStateSelect(Select):
"""
A Select widget that uses a list of Indian states/territories as its
choices.
"""
def __init__(self, attrs=None):
from in_states import STATE_CHOICES
super(INStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
|
bsd-3-clause
|
michaelwu/servo
|
components/script/dom/bindings/codegen/Configuration.py
|
6
|
13290
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from WebIDL import IDLInterface
autogenerated_comment = "/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n"
class Configuration:
"""
Represents global configuration state based on IDL parse data and
the configuration file.
"""
def __init__(self, filename, parseData):
# Read the configuration file.
glbl = {}
execfile(filename, glbl)
config = glbl['DOMInterfaces']
# Build descriptors for all the interfaces we have in the parse data.
# This allows callers to specify a subset of interfaces by filtering
# |parseData|.
self.descriptors = []
self.interfaces = {}
self.maxProtoChainLength = 0;
for thing in parseData:
# Some toplevel things are sadly types, and those have an
# isInterface that doesn't mean the same thing as IDLObject's
# isInterface()...
if not isinstance(thing, IDLInterface):
continue
iface = thing
self.interfaces[iface.identifier.name] = iface
if iface.identifier.name not in config:
# Completely skip consequential interfaces with no descriptor
# if they have no interface object because chances are we
# don't need to do anything interesting with them.
if iface.isConsequential() and not iface.hasInterfaceObject():
continue
entry = {}
else:
entry = config[iface.identifier.name]
if not isinstance(entry, list):
assert isinstance(entry, dict)
entry = [entry]
self.descriptors.extend([Descriptor(self, iface, x) for x in entry])
# Mark the descriptors for which only a single nativeType implements
# an interface.
for descriptor in self.descriptors:
intefaceName = descriptor.interface.identifier.name
otherDescriptors = [d for d in self.descriptors
if d.interface.identifier.name == intefaceName]
descriptor.uniqueImplementation = len(otherDescriptors) == 1
self.enums = [e for e in parseData if e.isEnum()]
self.dictionaries = [d for d in parseData if d.isDictionary()]
self.callbacks = [c for c in parseData if
c.isCallback() and not c.isInterface()]
# Keep the descriptor list sorted for determinism.
self.descriptors.sort(lambda x,y: cmp(x.name, y.name))
def getInterface(self, ifname):
return self.interfaces[ifname]
def getDescriptors(self, **filters):
"""Gets the descriptors that match the given filters."""
curr = self.descriptors
for key, val in filters.iteritems():
if key == 'webIDLFile':
getter = lambda x: x.interface.filename()
elif key == 'hasInterfaceObject':
getter = lambda x: x.interface.hasInterfaceObject()
elif key == 'isCallback':
getter = lambda x: x.interface.isCallback()
elif key == 'isJSImplemented':
getter = lambda x: x.interface.isJSImplemented()
else:
getter = lambda x: getattr(x, key)
curr = filter(lambda x: getter(x) == val, curr)
return curr
def getEnums(self, webIDLFile):
return filter(lambda e: e.filename() == webIDLFile, self.enums)
@staticmethod
def _filterForFile(items, webIDLFile=""):
"""Gets the items that match the given filters."""
if not webIDLFile:
return items
return filter(lambda x: x.filename() == webIDLFile, items)
def getDictionaries(self, webIDLFile=""):
return self._filterForFile(self.dictionaries, webIDLFile=webIDLFile)
def getCallbacks(self, webIDLFile=""):
return self._filterForFile(self.callbacks, webIDLFile=webIDLFile)
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name.
"""
iface = self.getInterface(interfaceName)
descriptors = self.getDescriptors(interface=iface)
# We should have exactly one result.
if len(descriptors) is not 1:
raise NoSuchDescriptorError("For " + interfaceName + " found " +
str(len(matches)) + " matches");
return descriptors[0]
def getDescriptorProvider(self):
"""
Gets a descriptor provider that can provide descriptors as needed.
"""
return DescriptorProvider(self)
class NoSuchDescriptorError(TypeError):
def __init__(self, str):
TypeError.__init__(self, str)
class DescriptorProvider:
"""
A way of getting descriptors for interface names
"""
def __init__(self, config):
self.config = config
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name given the
context of the current descriptor.
"""
return self.config.getDescriptor(interfaceName)
class Descriptor(DescriptorProvider):
"""
Represents a single descriptor for an interface. See Bindings.conf.
"""
def __init__(self, config, interface, desc):
DescriptorProvider.__init__(self, config)
self.interface = interface
# Read the desc, and fill in the relevant defaults.
ifaceName = self.interface.identifier.name
# Callback types do not use JS smart pointers, so we should not use the
# built-in rooting mechanisms for them.
if self.interface.isCallback():
self.needsRooting = False
ty = "%sBinding::%s" % (ifaceName, ifaceName)
self.returnType = ty
self.argumentType = "???"
self.memberType = "???"
self.nativeType = ty
else:
self.needsRooting = True
self.returnType = "Temporary<%s>" % ifaceName
self.argumentType = "JSRef<%s>" % ifaceName
self.memberType = "Root<'a, 'b, %s>" % ifaceName
self.nativeType = "JS<%s>" % ifaceName
self.concreteType = ifaceName
self.register = desc.get('register', True)
self.outerObjectHook = desc.get('outerObjectHook', 'None')
# If we're concrete, we need to crawl our ancestor interfaces and mark
# them as having a concrete descendant.
self.concrete = desc.get('concrete', True)
if self.concrete:
self.proxy = False
operations = {
'IndexedGetter': None,
'IndexedSetter': None,
'IndexedCreator': None,
'IndexedDeleter': None,
'NamedGetter': None,
'NamedSetter': None,
'NamedCreator': None,
'NamedDeleter': None,
'Stringifier': None
}
iface = self.interface
while iface:
for m in iface.members:
if not m.isMethod():
continue
def addOperation(operation, m):
if not operations[operation]:
operations[operation] = m
def addIndexedOrNamedOperation(operation, m):
self.proxy = True
if m.isIndexed():
operation = 'Indexed' + operation
else:
assert m.isNamed()
operation = 'Named' + operation
addOperation(operation, m)
if m.isStringifier():
addOperation('Stringifier', m)
else:
if m.isGetter():
addIndexedOrNamedOperation('Getter', m)
if m.isSetter():
addIndexedOrNamedOperation('Setter', m)
if m.isCreator():
addIndexedOrNamedOperation('Creator', m)
if m.isDeleter():
addIndexedOrNamedOperation('Deleter', m)
iface.setUserData('hasConcreteDescendant', True)
iface = iface.parent
if self.proxy:
self.operations = operations
iface = self.interface
while iface:
iface.setUserData('hasProxyDescendant', True)
iface = iface.parent
self.name = interface.identifier.name
# self.extendedAttributes is a dict of dicts, keyed on
# all/getterOnly/setterOnly and then on member name. Values are an
# array of extended attributes.
self.extendedAttributes = { 'all': {}, 'getterOnly': {}, 'setterOnly': {} }
def addExtendedAttribute(attribute, config):
def add(key, members, attribute):
for member in members:
self.extendedAttributes[key].setdefault(member, []).append(attribute)
if isinstance(config, dict):
for key in ['all', 'getterOnly', 'setterOnly']:
add(key, config.get(key, []), attribute)
elif isinstance(config, list):
add('all', config, attribute)
else:
assert isinstance(config, str)
if config == '*':
iface = self.interface
while iface:
add('all', map(lambda m: m.name, iface.members), attribute)
iface = iface.parent
else:
add('all', [config], attribute)
# Build the prototype chain.
self.prototypeChain = []
parent = interface
while parent:
self.prototypeChain.insert(0, parent.identifier.name)
parent = parent.parent
config.maxProtoChainLength = max(config.maxProtoChainLength,
len(self.prototypeChain))
def getExtendedAttributes(self, member, getter=False, setter=False):
def maybeAppendInfallibleToAttrs(attrs, throws):
if throws is None:
attrs.append("infallible")
elif throws is True:
pass
else:
raise TypeError("Unknown value for 'Throws'")
name = member.identifier.name
if member.isMethod():
attrs = self.extendedAttributes['all'].get(name, [])
throws = member.getExtendedAttribute("Throws")
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
assert member.isAttr()
assert bool(getter) != bool(setter)
key = 'getterOnly' if getter else 'setterOnly'
attrs = self.extendedAttributes['all'].get(name, []) + self.extendedAttributes[key].get(name, [])
throws = member.getExtendedAttribute("Throws")
if throws is None:
throwsAttr = "GetterThrows" if getter else "SetterThrows"
throws = member.getExtendedAttribute(throwsAttr)
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
def isGlobal(self):
"""
Returns true if this is the primary interface for a global object
of some sort.
"""
return (self.interface.getExtendedAttribute("Global") or
self.interface.getExtendedAttribute("PrimaryGlobal"))
# Some utility methods
def getTypesFromDescriptor(descriptor):
"""
Get all argument and return types for all members of the descriptor
"""
members = [m for m in descriptor.interface.members]
if descriptor.interface.ctor():
members.append(descriptor.interface.ctor())
members.extend(descriptor.interface.namedConstructors)
signatures = [s for m in members if m.isMethod() for s in m.signatures()]
types = []
for s in signatures:
assert len(s) == 2
(returnType, arguments) = s
types.append(returnType)
types.extend(a.type for a in arguments)
types.extend(a.type for a in members if a.isAttr())
return types
def getFlatTypes(types):
retval = set()
for type in types:
type = type.unroll()
if type.isUnion():
retval |= set(type.flatMemberTypes)
else:
retval.add(type)
return retval
def getTypesFromDictionary(dictionary):
"""
Get all member types for this dictionary
"""
types = []
curDict = dictionary
while curDict:
types.extend([m.type for m in curDict.members])
curDict = curDict.parent
return types
def getTypesFromCallback(callback):
"""
Get the types this callback depends on: its return type and the
types of its arguments.
"""
sig = callback.signatures()[0]
types = [sig[0]] # Return type
types.extend(arg.type for arg in sig[1]) # Arguments
return types
|
mpl-2.0
|
Turlough/keyczar
|
cpp/src/tools/swtoolkit/site_scons/site_tools/collada_dom.py
|
12
|
1830
|
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Collada DOM 1.3.0 tool for SCons."""
import sys
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
env.Append(CCFLAGS=[
'-I$COLLADA_DIR/include',
'-I$COLLADA_DIR/include/1.4',
])
|
apache-2.0
|
mateoqac/unqTip
|
lib_gb/six.py
|
878
|
29664
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
|
gpl-3.0
|
wakatime/wakatime
|
wakatime/packages/py26/pygments/lexers/configs.py
|
25
|
28266
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.configs
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for configuration file formats.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, words, bygroups, include, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace, Literal
from pygments.lexers.shell import BashLexer
__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
'TerraformLexer', 'TermcapLexer', 'TerminfoLexer',
'PkgConfigLexer', 'PacmanConfLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg', 'dosini']
filenames = ['*.ini', '*.cfg', '*.inf']
mimetypes = ['text/x-ini', 'text/inf']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
# standalone option, supported by some INI parsers
(r'(.+?)$', Name.Attribute),
],
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class RegeditLexer(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
.. versionadded:: 1.6
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.+', String, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
Note: trailing whitespace counts as part of the value as per spec
.. versionadded:: 1.4
"""
name = 'Properties'
aliases = ['properties', 'jproperties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
tokens = {
'root': [
(r'^(\w+)([ \t])(\w+\s*)$', bygroups(Name.Attribute, Text, String)),
(r'^\w+(\\[ \t]\w*)*$', Name.Attribute),
(r'(^ *)([#!].*)', bygroups(Text, Comment)),
# More controversial comments
(r'(^ *)((?:;|//).*)', bygroups(Text, Comment)),
(r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
(r'\s', Text),
],
}
def _rx_indent(level):
# Kconfig *always* interprets a tab as 8 spaces, so this is the default.
# Edit this if you are in an environment where KconfigLexer gets expanded
# input (tabs expanded to spaces) and the expansion tab width is != 8,
# e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width).
# Value range here is 2 <= {tab_width} <= 8.
tab_width = 8
# Regex matching a given indentation {level}, assuming that indentation is
# a multiple of {tab_width}. In other cases there might be problems.
if tab_width == 2:
space_repeat = '+'
else:
space_repeat = '{1,%d}' % (tab_width - 1)
if level == 1:
level_repeat = ''
else:
level_repeat = '{%s}' % level
return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat)
class KconfigLexer(RegexLexer):
"""
For Linux-style Kconfig files.
.. versionadded:: 1.6
"""
name = 'Kconfig'
aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config']
# Adjust this if new kconfig file names appear in your environment
filenames = ['Kconfig', '*Config.in*', 'external.in*',
'standard-modules.in']
mimetypes = ['text/x-kconfig']
# No re.MULTILINE, indentation-aware help text needs line-by-line handling
flags = 0
def call_indent(level):
# If indentation >= {level} is detected, enter state 'indent{level}'
return (_rx_indent(level), String.Doc, 'indent%s' % level)
def do_indent(level):
# Print paragraphs of indentation level >= {level} as String.Doc,
# ignoring blank lines. Then return to 'root' state.
return [
(_rx_indent(level), String.Doc),
(r'\s*\n', Text),
default('#pop:2')
]
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(words((
'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice',
'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif',
'source', 'prompt', 'select', 'depends on', 'default',
'range', 'option'), suffix=r'\b'),
Keyword),
(r'(---help---|help)[\t ]*\n', Keyword, 'help'),
(r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b',
Name.Builtin),
(r'[!=&|]', Operator),
(r'[()]', Punctuation),
(r'[0-9]+', Number.Integer),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r'\S+', Text),
],
# Help text is indented, multi-line and ends when a lower indentation
# level is detected.
'help': [
# Skip blank lines after help token, if any
(r'\s*\n', Text),
# Determine the first help line's indentation level heuristically(!).
# Attention: this is not perfect, but works for 99% of "normal"
# indentation schemes up to a max. indentation level of 7.
call_indent(7),
call_indent(6),
call_indent(5),
call_indent(4),
call_indent(3),
call_indent(2),
call_indent(1),
default('#pop'), # for incomplete help sections without text
],
# Handle text for indentation levels 7 to 1
'indent7': do_indent(7),
'indent6': do_indent(6),
'indent5': do_indent(5),
'indent4': do_indent(4),
'indent3': do_indent(3),
'indent2': do_indent(2),
'indent1': do_indent(1),
}
class Cfengine3Lexer(RegexLexer):
"""
Lexer for `CFEngine3 <http://cfengine.org>`_ policy files.
.. versionadded:: 1.5
"""
name = 'CFEngine3'
aliases = ['cfengine3', 'cf3']
filenames = ['*.cf']
mimetypes = []
tokens = {
'root': [
(r'#.*?\n', Comment),
(r'(body)(\s+)(\S+)(\s+)(control)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()',
bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation),
'arglist'),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)',
bygroups(Punctuation, Name.Variable, Punctuation,
Text, Keyword.Type, Text, Operator, Text)),
(r'(\S+)(\s*)(=>)(\s*)',
bygroups(Keyword.Reserved, Text, Operator, Text)),
(r'"', String, 'string'),
(r'(\w+)(\()', bygroups(Name.Function, Punctuation)),
(r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)),
(r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)),
(r'@[{(][^)}]+[})]', Name.Variable),
(r'[(){},;]', Punctuation),
(r'=>', Operator),
(r'->', Operator),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\w+', Name.Function),
(r'\s+', Text),
],
'string': [
(r'\$[{(]', String.Interpol, 'interpol'),
(r'\\.', String.Escape),
(r'"', String, '#pop'),
(r'\n', String),
(r'.', String),
],
'interpol': [
(r'\$[{(]', String.Interpol, '#push'),
(r'[})]', String.Interpol, '#pop'),
(r'[^${()}]+', String.Interpol),
],
'arglist': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'\w+', Name.Variable),
(r'\s+', Text),
],
}
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
.. versionadded:: 0.6
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-z]\w*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'\\\n', Text),
(r'$', Text, '#pop'),
(r'\\', Text),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-z0-9][\w./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"\\]+', Text)
],
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
.. versionadded:: 0.9
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = (
"access_log", "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to", "anonymize_headers",
"append_domain", "as_whois_server", "auth_param_basic",
"authenticate_children", "authenticate_program", "authenticate_ttl",
"broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
"cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
"cache_peer_access", "cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
"client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
"dead_peer_timeout", "debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters", "delay_pools",
"deny_info", "dns_children", "dns_defnames", "dns_nameservers",
"dns_testnames", "emulate_httpd_log", "err_html_text",
"fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
"fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients", "header_access",
"header_replace", "hierarchy_stoplist", "high_response_time_warning",
"high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
"http_anonymizer", "httpd_accel", "httpd_accel_host",
"httpd_accel_port", "httpd_accel_uses_host_header",
"httpd_accel_with_proxy", "http_port", "http_reply_access",
"icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average", "inside_firewall",
"ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
"local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
"log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy", "mime_table",
"min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
"minimum_object_size", "minimum_retry_timeout", "miss_access",
"negative_dns_ttl", "negative_ttl", "neighbor_timeout",
"neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
"pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
"prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"refresh_pattern", "reload_into_ims", "request_body_max_size",
"request_size", "request_timeout", "shutdown_lifetime",
"single_parent_bypass", "siteselect_timeout", "snmp_access",
"snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
"test_reachability", "udp_hit_obj", "udp_hit_obj_size",
"udp_incoming_address", "udp_outgoing_address", "unique_hostname",
"unlinkd_program", "uri_whitespace", "useragent_log",
"visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
)
opts = (
"proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
"multicast-responder", "on", "off", "all", "deny", "allow", "via",
"parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
"credentialsttl", "none", "disable", "offline_toggle", "diskd",
)
actions = (
"shutdown", "info", "parameter", "server_list", "client_list",
r'squid.conf',
)
actions_stats = (
"objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
"redirector", "io", "reply_headers", "filedescriptors", "netdb",
)
actions_log = ("status", "enable", "disable", "clear")
acls = (
"url_regex", "urlpath_regex", "referer_regex", "port", "proto",
"req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
"dst", "time", "dstdomain", "ident", "snmp_community",
)
ip_re = (
r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
r'[1-9]?\d)){3}))'
)
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#', Comment, 'comment'),
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
(words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant),
# Actions
(words(actions, prefix=r'\b', suffix=r'\b'), String),
(words(actions_stats, prefix=r'stats/', suffix=r'\b'), String),
(words(actions_log, prefix=r'log/', suffix=r'='), String),
(words(acls, prefix=r'\b', suffix=r'\b'), Keyword),
(ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.+', Comment, '#pop'),
default('#pop'),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = ['nginx.conf']
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'\}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'\{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
# (r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class DockerLexer(RegexLexer):
"""
Lexer for `Docker <http://docker.io>`_ configuration files.
.. versionadded:: 2.0
"""
name = 'Docker'
aliases = ['docker', 'dockerfile']
filenames = ['Dockerfile', '*.docker']
mimetypes = ['text/x-dockerfile-config']
_keywords = (r'(?:FROM|MAINTAINER|CMD|EXPOSE|ENV|ADD|ENTRYPOINT|'
r'VOLUME|WORKDIR)')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'^(ONBUILD)(\s+)(%s)\b' % (_keywords,),
bygroups(Name.Keyword, Whitespace, Keyword)),
(r'^(%s)\b(.*)' % (_keywords,), bygroups(Keyword, String)),
(r'#.*', Comment),
(r'RUN', Keyword), # Rest of line falls through
(r'(.*\\\n)*.+', using(BashLexer)),
],
}
class TerraformLexer(RegexLexer):
"""
Lexer for `terraformi .tf files <https://www.terraform.io/>`_.
.. versionadded:: 2.1
"""
name = 'Terraform'
aliases = ['terraform', 'tf']
filenames = ['*.tf']
mimetypes = ['application/x-tf', 'application/x-terraform']
tokens = {
'root': [
include('string'),
include('punctuation'),
include('curly'),
include('basic'),
include('whitespace'),
(r'[0-9]+', Number),
],
'basic': [
(words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
(r'\s*/\*', Comment.Multiline, 'comment'),
(r'\s*#.*\n', Comment.Single),
(r'(.*?)(\s*)(=)', bygroups(Name.Attribute, Text, Operator)),
(words(('variable', 'resource', 'provider', 'provisioner', 'module'),
prefix=r'\b', suffix=r'\b'), Keyword.Reserved, 'function'),
(words(('ingress', 'egress', 'listener', 'default', 'connection'),
prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
('\$\{', String.Interpol, 'var_builtin'),
],
'function': [
(r'(\s+)(".*")(\s+)', bygroups(Text, String, Text)),
include('punctuation'),
include('curly'),
],
'var_builtin': [
(r'\$\{', String.Interpol, '#push'),
(words(('concat', 'file', 'join', 'lookup', 'element'),
prefix=r'\b', suffix=r'\b'), Name.Builtin),
include('string'),
include('punctuation'),
(r'\s+', Text),
(r'\}', String.Interpol, '#pop'),
],
'string': [
(r'(".*")', bygroups(String.Double)),
],
'punctuation': [
(r'[\[\](),.]', Punctuation),
],
# Keep this seperate from punctuation - we sometimes want to use different
# Tokens for { }
'curly': [
(r'\{', Text.Punctuation),
(r'\}', Text.Punctuation),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
],
}
class TermcapLexer(RegexLexer):
"""
Lexer for termcap database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Termcap'
aliases = ['termcap']
filenames = ['termcap', 'termcap.src']
mimetypes = []
# NOTE:
# * multiline with trailing backslash
# * separator is ':'
# * to embed colon as data, we must use \072
# * space after separator is not allowed (mayve)
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#:|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r':', Punctuation, 'defs'),
(r'\|', Punctuation),
(r'[^:|]+', Name.Attribute),
],
'defs': [
(r'\\\n[ \t]*', Text),
(r'\n[ \t]*', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r':', Punctuation),
(r'[^\s:=#]+', Name.Class),
],
'data': [
(r'\\072', Literal),
(r':', Punctuation, '#pop'),
(r'[^:\\]+', Literal), # for performance
(r'.', Literal),
],
}
class TerminfoLexer(RegexLexer):
"""
Lexer for terminfo database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Terminfo'
aliases = ['terminfo']
filenames = ['terminfo', 'terminfo.src']
mimetypes = []
# NOTE:
# * multiline with leading whitespace
# * separator is ','
# * to embed comma as data, we can use \,
# * space after separator is allowed
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#,|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), 'defs'),
(r'\|', Punctuation),
(r'[^,|]+', Name.Attribute),
],
'defs': [
(r'\n[ \t]+', Text),
(r'\n', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text)),
(r'[^\s,=#]+', Name.Class),
],
'data': [
(r'\\[,\\]', Literal),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), '#pop'),
(r'[^\\,]+', Literal), # for performance
(r'.', Literal),
],
}
class PkgConfigLexer(RegexLexer):
"""
Lexer for `pkg-config
<http://www.freedesktop.org/wiki/Software/pkg-config/>`_
(see also `manual page <http://linux.die.net/man/1/pkg-config>`_).
.. versionadded:: 2.1
"""
name = 'PkgConfig'
aliases = ['pkgconfig']
filenames = ['*.pc']
mimetypes = []
tokens = {
'root': [
(r'#.*$', Comment.Single),
# variable definitions
(r'^(\w+)(=)', bygroups(Name.Attribute, Operator)),
# keyword lines
(r'^([\w.]+)(:)',
bygroups(Name.Tag, Punctuation), 'spvalue'),
# variable references
include('interp'),
# fallback
(r'[^${}#=:\n.]+', Text),
(r'.', Text),
],
'interp': [
# you can escape literal "$" as "$$"
(r'\$\$', Text),
# variable references
(r'\$\{', String.Interpol, 'curly'),
],
'curly': [
(r'\}', String.Interpol, '#pop'),
(r'\w+', Name.Attribute),
],
'spvalue': [
include('interp'),
(r'#.*$', Comment.Single, '#pop'),
(r'\n', Text, '#pop'),
# fallback
(r'[^${}#\n]+', Text),
(r'.', Text),
],
}
class PacmanConfLexer(RegexLexer):
"""
Lexer for `pacman.conf
<https://www.archlinux.org/pacman/pacman.conf.5.html>`_.
Actually, IniLexer works almost fine for this format,
but it yield error token. It is because pacman.conf has
a form without assignment like:
UseSyslog
Color
TotalDownload
CheckSpace
VerbosePkgLists
These are flags to switch on.
.. versionadded:: 2.1
"""
name = 'PacmanConf'
aliases = ['pacmanconf']
filenames = ['pacman.conf']
mimetypes = []
tokens = {
'root': [
# comment
(r'#.*$', Comment.Single),
# section header
(r'^\s*\[.*?\]\s*$', Keyword),
# variable definitions
# (Leading space is allowed...)
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
# flags to on
(r'^(\s*)(\w+)(\s*)$',
bygroups(Text, Name.Attribute, Text)),
# built-in special values
(words((
'$repo', # repository
'$arch', # architecture
'%o', # outfile
'%u', # url
), suffix=r'\b'),
Name.Variable),
# fallback
(r'.', Text),
],
}
|
bsd-3-clause
|
JCBarahona/edX
|
common/djangoapps/course_modes/migrations/0007_add_description.py
|
114
|
2270
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.description'
db.add_column('course_modes_coursemode', 'description',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Changing field 'CourseMode.course_id'
db.alter_column('course_modes_coursemode', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
def backwards(self, orm):
# Deleting field 'CourseMode.description'
db.delete_column('course_modes_coursemode', 'description')
# Changing field 'CourseMode.course_id'
db.alter_column('course_modes_coursemode', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
|
agpl-3.0
|
c0hen/django-venv
|
lib/python3.4/site-packages/django/contrib/gis/db/backends/spatialite/models.py
|
510
|
2946
|
"""
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.contrib.gis.db.backends.spatialite.base import DatabaseWrapper
from django.db import connection, models
from django.db.backends.signals import connection_created
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class SpatialiteGeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialiteSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
if hasattr(self, 'srtext'):
return self.srtext
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
def add_spatial_version_related_fields(sender, **kwargs):
"""
Adds fields after establishing a database connection to prevent database
operations at compile time.
"""
if connection_created.disconnect(add_spatial_version_related_fields, sender=DatabaseWrapper):
spatial_version = connection.ops.spatial_version[0]
if spatial_version >= 4:
SpatialiteSpatialRefSys.add_to_class('srtext', models.CharField(max_length=2048))
SpatialiteGeometryColumns.add_to_class('type', models.IntegerField(db_column='geometry_type'))
else:
SpatialiteGeometryColumns.add_to_class('type', models.CharField(max_length=30))
connection_created.connect(add_spatial_version_related_fields, sender=DatabaseWrapper)
|
gpl-3.0
|
smily77/Arduino
|
arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/connectionpool.py
|
184
|
20547
|
# urllib3/connectionpool.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import socket
import errno
from socket import error as SocketError, timeout as SocketTimeout
from .util import resolve_cert_reqs, resolve_ssl_version
try: # Python 3
from http.client import HTTPConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
except ImportError:
from httplib import HTTPConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
try: # Compiled with SSL?
HTTPSConnection = object
BaseSSLError = None
ssl = None
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .request import RequestMethods
from .response import HTTPResponse
from .util import get_host, is_connection_dropped, ssl_wrap_socket
from .exceptions import (
ClosedPoolError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .packages import six
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': HTTP_PORT,
'https': HTTPS_PORT,
}
## Connection objects (extension of httplib)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
def connect(self):
# Add certificate verification
sock = socket.create_connection((self.host, self.port), self.timeout)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
match_hostname(self.sock.getpeercert(), self.host)
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
:param timeout:
Socket timeout for each individual connection, can be a float. None
disables timeout.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
scheme = 'http'
def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1,
block=False, headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
return HTTPConnection(host=self.host,
port=self.port,
strict=self.strict)
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
# Connection never got put back into the pool, close it.
conn.close()
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
"""
self.num_requests += 1
if timeout is _Default:
timeout = self.timeout
conn.timeout = timeout # This only does anything in Py26+
conn.request(method, url, **httplib_request_kw)
# Set timeout
sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr.
if sock:
sock.settimeout(timeout)
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if self.port and not port:
# Use explicit default port for comparison when none is given.
port = port_by_scheme.get(scheme)
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one request.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if timeout is _Default:
timeout = self.timeout
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
host = "%s://%s" % (self.scheme, self.host)
if self.port:
host = "%s:%d" % (host, self.port)
raise HostChangedError(self, url, retries - 1)
conn = None
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty as e:
# Timed out by queue
raise TimeoutError(self, "Request timed out. (pool_timeout=%s)" %
pool_timeout)
except SocketTimeout as e:
# Timed out by socket
raise TimeoutError(self, "Request timed out. (timeout=%s)" %
timeout)
except BaseSSLError as e:
# SSL certificate error
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
raise MaxRetryError(self, url, e)
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`httplib.HTTPSConnection`.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, and ``ssl_version``
are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket.
"""
scheme = 'https'
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None, ssl_version=None):
HTTPConnectionPool.__init__(self, host, port,
strict, timeout, maxsize,
block, headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not ssl: # Platform-specific: Python compiled without +ssl
if not HTTPSConnection or HTTPSConnection is object:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
return HTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection = VerifiedHTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection.set_cert(key_file=self.key_file, cert_file=self.cert_file,
cert_reqs=self.cert_reqs, ca_certs=self.ca_certs)
connection.ssl_version = self.ssl_version
return connection
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
|
lgpl-2.1
|
timothycrosley/isort
|
isort/stdlibs/py39.py
|
3
|
3295
|
"""
File contains the standard library of Python 3.9.
DO NOT EDIT. If the standard library changes, a new list should be created
using the mkstdlibs.py script.
"""
stdlib = {
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"graphlib",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
"zoneinfo",
}
|
mit
|
olemis/brython
|
www/src/Lib/encodings/cp861.py
|
37
|
35331
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP861.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp861',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
0x008c: 0x00f0, # LATIN SMALL LETTER ETH
0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00fe, # LATIN SMALL LETTER THORN
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH
'\xf0' # 0x008c -> LATIN SMALL LETTER ETH
'\xde' # 0x008d -> LATIN CAPITAL LETTER THORN
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xfe' # 0x0095 -> LATIN SMALL LETTER THORN
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH
0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x008d, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f0: 0x008c, # LATIN SMALL LETTER ETH
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x0095, # LATIN SMALL LETTER THORN
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
bsd-3-clause
|
entwanne/Rodolphe
|
rodolphe/main/views/tag.py
|
2
|
1805
|
from django.shortcuts import render_to_response
from django.db.models import Q
from django.template import RequestContext
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.translation import ugettext as _
from main.models import Post, Tag
from main.forms import PostForm
from utils.search import get_search
from utils.tags import TagsSet
import re
from collections import defaultdict
from urllib.parse import urlencode
def index(request):
indexed_tags = defaultdict(list)
for tag in Tag.objects.all():
first_letter = tag.name[0].upper()
indexed_tags[first_letter].append(tag.name)
sorted_tags = []
for letter, tags in sorted(indexed_tags.items()):
sorted_tags.append((letter, sorted(tags)))
context = RequestContext(request, {
'indexed_tags': sorted_tags
})
return render_to_response('tags.html', context)
def search(request, pattern):
tags = TagsSet.from_string(pattern)
q, search = get_search(request)
regex = r'(\s|\A)\.{}(\W|\Z)'
for tag in tags:
q &= Q(content__iregex=regex.format(re.escape(tag)))
for tag in tags.iter_exclude():
q &= ~Q(content__iregex=regex.format(re.escape(tag)))
paginator = Paginator(Post.objects.filter(q, active=True)
.order_by('-created_at'), 10)
page_id = request.GET.get('page')
try:
posts = paginator.page(page_id)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = RequestContext(request, {
'page': posts,
'form': PostForm(),
'title': '{} - {}'.format(_('tag'), pattern),
'search': search
})
return render_to_response('index.html', context)
|
bsd-2-clause
|
KyoungRan/Django_React_ex
|
Django_React_Workshop-mbrochh/django/myvenv/lib/python3.4/site-packages/pip/_vendor/progress/helpers.py
|
521
|
2854
|
# Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
|
mit
|
BigRefT/bionic_cms
|
vendor/plugins/bionic-fckeditor/public/javascripts/fckeditor/editor/filemanager/connectors/py/zope.py
|
89
|
5685
|
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
|
mit
|
Simran-B/arangodb
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_textwrap.py
|
55
|
23220
|
#
# Test suite for the textwrap module.
#
# Original tests written by Greg Ward <[email protected]>.
# Converted to PyUnit by Peter Hansen <[email protected]>.
# Currently maintained by Greg Ward.
#
# $Id: test_textwrap.py 67896 2008-12-21 17:01:26Z benjamin.peterson $
#
import unittest
from test import test_support
from textwrap import TextWrapper, wrap, fill, dedent
class BaseTestCase(unittest.TestCase):
'''Parent class with utility methods for textwrap tests.'''
def show(self, textin):
if isinstance(textin, list):
result = []
for i in range(len(textin)):
result.append(" %d: %r" % (i, textin[i]))
result = '\n'.join(result)
elif isinstance(textin, basestring):
result = " %s\n" % repr(textin)
return result
def check(self, result, expect):
self.assertEquals(result, expect,
'expected:\n%s\nbut got:\n%s' % (
self.show(expect), self.show(result)))
def check_wrap(self, text, width, expect, **kwargs):
result = wrap(text, width, **kwargs)
self.check(result, expect)
def check_split(self, text, expect):
result = self.wrapper._split(text)
self.assertEquals(result, expect,
"\nexpected %r\n"
"but got %r" % (expect, result))
class WrapTestCase(BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper(width=45)
def test_simple(self):
# Simple case: just words, spaces, and a bit of punctuation
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_wrap(text, 12,
["Hello there,",
"how are you",
"this fine",
"day? I'm",
"glad to hear",
"it!"])
self.check_wrap(text, 42,
["Hello there, how are you this fine day?",
"I'm glad to hear it!"])
self.check_wrap(text, 80, [text])
def test_whitespace(self):
# Whitespace munging and end-of-sentence detection
text = """\
This is a paragraph that already has
line breaks. But some of its lines are much longer than the others,
so it needs to be wrapped.
Some lines are \ttabbed too.
What a mess!
"""
expect = ["This is a paragraph that already has line",
"breaks. But some of its lines are much",
"longer than the others, so it needs to be",
"wrapped. Some lines are tabbed too. What a",
"mess!"]
wrapper = TextWrapper(45, fix_sentence_endings=True)
result = wrapper.wrap(text)
self.check(result, expect)
result = wrapper.fill(text)
self.check(result, '\n'.join(expect))
def test_fix_sentence_endings(self):
wrapper = TextWrapper(60, fix_sentence_endings=True)
# SF #847346: ensure that fix_sentence_endings=True does the
# right thing even on input short enough that it doesn't need to
# be wrapped.
text = "A short line. Note the single space."
expect = ["A short line. Note the single space."]
self.check(wrapper.wrap(text), expect)
# Test some of the hairy end cases that _fix_sentence_endings()
# is supposed to handle (the easy stuff is tested in
# test_whitespace() above).
text = "Well, Doctor? What do you think?"
expect = ["Well, Doctor? What do you think?"]
self.check(wrapper.wrap(text), expect)
text = "Well, Doctor?\nWhat do you think?"
self.check(wrapper.wrap(text), expect)
text = 'I say, chaps! Anyone for "tennis?"\nHmmph!'
expect = ['I say, chaps! Anyone for "tennis?" Hmmph!']
self.check(wrapper.wrap(text), expect)
wrapper.width = 20
expect = ['I say, chaps!', 'Anyone for "tennis?"', 'Hmmph!']
self.check(wrapper.wrap(text), expect)
text = 'And she said, "Go to hell!"\nCan you believe that?'
expect = ['And she said, "Go to',
'hell!" Can you',
'believe that?']
self.check(wrapper.wrap(text), expect)
wrapper.width = 60
expect = ['And she said, "Go to hell!" Can you believe that?']
self.check(wrapper.wrap(text), expect)
text = 'File stdio.h is nice.'
expect = ['File stdio.h is nice.']
self.check(wrapper.wrap(text), expect)
def test_wrap_short(self):
# Wrapping to make short lines longer
text = "This is a\nshort paragraph."
self.check_wrap(text, 20, ["This is a short",
"paragraph."])
self.check_wrap(text, 40, ["This is a short paragraph."])
def test_wrap_short_1line(self):
# Test endcases
text = "This is a short line."
self.check_wrap(text, 30, ["This is a short line."])
self.check_wrap(text, 30, ["(1) This is a short line."],
initial_indent="(1) ")
def test_hyphenated(self):
# Test breaking hyphenated words
text = ("this-is-a-useful-feature-for-"
"reformatting-posts-from-tim-peters'ly")
self.check_wrap(text, 40,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 41,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 42,
["this-is-a-useful-feature-for-reformatting-",
"posts-from-tim-peters'ly"])
def test_hyphenated_numbers(self):
# Test that hyphenated numbers (eg. dates) are not broken like words.
text = ("Python 1.0.0 was released on 1994-01-26. Python 1.0.1 was\n"
"released on 1994-02-15.")
self.check_wrap(text, 35, ['Python 1.0.0 was released on',
'1994-01-26. Python 1.0.1 was',
'released on 1994-02-15.'])
self.check_wrap(text, 40, ['Python 1.0.0 was released on 1994-01-26.',
'Python 1.0.1 was released on 1994-02-15.'])
text = "I do all my shopping at 7-11."
self.check_wrap(text, 25, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 27, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 29, ["I do all my shopping at 7-11."])
def test_em_dash(self):
# Test text with em-dashes
text = "Em-dashes should be written -- thus."
self.check_wrap(text, 25,
["Em-dashes should be",
"written -- thus."])
# Probe the boundaries of the properly written em-dash,
# ie. " -- ".
self.check_wrap(text, 29,
["Em-dashes should be written",
"-- thus."])
expect = ["Em-dashes should be written --",
"thus."]
self.check_wrap(text, 30, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 36,
["Em-dashes should be written -- thus."])
# The improperly written em-dash is handled too, because
# it's adjacent to non-whitespace on both sides.
text = "You can also do--this or even---this."
expect = ["You can also do",
"--this or even",
"---this."]
self.check_wrap(text, 15, expect)
self.check_wrap(text, 16, expect)
expect = ["You can also do--",
"this or even---",
"this."]
self.check_wrap(text, 17, expect)
self.check_wrap(text, 19, expect)
expect = ["You can also do--this or even",
"---this."]
self.check_wrap(text, 29, expect)
self.check_wrap(text, 31, expect)
expect = ["You can also do--this or even---",
"this."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 35, expect)
# All of the above behaviour could be deduced by probing the
# _split() method.
text = "Here's an -- em-dash and--here's another---and another!"
expect = ["Here's", " ", "an", " ", "--", " ", "em-", "dash", " ",
"and", "--", "here's", " ", "another", "---",
"and", " ", "another!"]
self.check_split(text, expect)
text = "and then--bam!--he was gone"
expect = ["and", " ", "then", "--", "bam!", "--",
"he", " ", "was", " ", "gone"]
self.check_split(text, expect)
def test_unix_options (self):
# Test that Unix-style command-line options are wrapped correctly.
# Both Optik (OptionParser) and Docutils rely on this behaviour!
text = "You should use the -n option, or --dry-run in its long form."
self.check_wrap(text, 20,
["You should use the",
"-n option, or --dry-",
"run in its long",
"form."])
self.check_wrap(text, 21,
["You should use the -n",
"option, or --dry-run",
"in its long form."])
expect = ["You should use the -n option, or",
"--dry-run in its long form."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 34, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 38, expect)
expect = ["You should use the -n option, or --dry-",
"run in its long form."]
self.check_wrap(text, 39, expect)
self.check_wrap(text, 41, expect)
expect = ["You should use the -n option, or --dry-run",
"in its long form."]
self.check_wrap(text, 42, expect)
# Again, all of the above can be deduced from _split().
text = "the -n option, or --dry-run or --dryrun"
expect = ["the", " ", "-n", " ", "option,", " ", "or", " ",
"--dry-", "run", " ", "or", " ", "--dryrun"]
self.check_split(text, expect)
def test_funky_hyphens (self):
# Screwy edge cases cooked up by David Goodger. All reported
# in SF bug #596434.
self.check_split("what the--hey!", ["what", " ", "the", "--", "hey!"])
self.check_split("what the--", ["what", " ", "the--"])
self.check_split("what the--.", ["what", " ", "the--."])
self.check_split("--text--.", ["--text--."])
# When I first read bug #596434, this is what I thought David
# was talking about. I was wrong; these have always worked
# fine. The real problem is tested in test_funky_parens()
# below...
self.check_split("--option", ["--option"])
self.check_split("--option-opt", ["--option-", "opt"])
self.check_split("foo --option-opt bar",
["foo", " ", "--option-", "opt", " ", "bar"])
def test_punct_hyphens(self):
# Oh bother, SF #965425 found another problem with hyphens --
# hyphenated words in single quotes weren't handled correctly.
# In fact, the bug is that *any* punctuation around a hyphenated
# word was handled incorrectly, except for a leading "--", which
# was special-cased for Optik and Docutils. So test a variety
# of styles of punctuation around a hyphenated word.
# (Actually this is based on an Optik bug report, #813077).
self.check_split("the 'wibble-wobble' widget",
['the', ' ', "'wibble-", "wobble'", ' ', 'widget'])
self.check_split('the "wibble-wobble" widget',
['the', ' ', '"wibble-', 'wobble"', ' ', 'widget'])
self.check_split("the (wibble-wobble) widget",
['the', ' ', "(wibble-", "wobble)", ' ', 'widget'])
self.check_split("the ['wibble-wobble'] widget",
['the', ' ', "['wibble-", "wobble']", ' ', 'widget'])
def test_funky_parens (self):
# Second part of SF bug #596434: long option strings inside
# parentheses.
self.check_split("foo (--option) bar",
["foo", " ", "(--option)", " ", "bar"])
# Related stuff -- make sure parens work in simpler contexts.
self.check_split("foo (bar) baz",
["foo", " ", "(bar)", " ", "baz"])
self.check_split("blah (ding dong), wubba",
["blah", " ", "(ding", " ", "dong),",
" ", "wubba"])
def test_initial_whitespace(self):
# SF bug #622849 reported inconsistent handling of leading
# whitespace; let's test that a bit, shall we?
text = " This is a sentence with leading whitespace."
self.check_wrap(text, 50,
[" This is a sentence with leading whitespace."])
self.check_wrap(text, 30,
[" This is a sentence with", "leading whitespace."])
def test_no_drop_whitespace(self):
# SF patch #1581073
text = " This is a sentence with much whitespace."
self.check_wrap(text, 10,
[" This is a", " ", "sentence ",
"with ", "much white", "space."],
drop_whitespace=False)
if test_support.have_unicode:
def test_unicode(self):
# *Very* simple test of wrapping Unicode strings. I'm sure
# there's more to it than this, but let's at least make
# sure textwrap doesn't crash on Unicode input!
text = u"Hello there, how are you today?"
self.check_wrap(text, 50, [u"Hello there, how are you today?"])
self.check_wrap(text, 20, [u"Hello there, how are", "you today?"])
olines = self.wrapper.wrap(text)
assert isinstance(olines, list) and isinstance(olines[0], unicode)
otext = self.wrapper.fill(text)
assert isinstance(otext, unicode)
def test_no_split_at_umlaut(self):
text = u"Die Empf\xe4nger-Auswahl"
self.check_wrap(text, 13, [u"Die", u"Empf\xe4nger-", u"Auswahl"])
def test_umlaut_followed_by_dash(self):
text = u"aa \xe4\xe4-\xe4\xe4"
self.check_wrap(text, 7, [u"aa \xe4\xe4-", u"\xe4\xe4"])
def test_split(self):
# Ensure that the standard _split() method works as advertised
# in the comments
text = "Hello there -- you goof-ball, use the -b option!"
result = self.wrapper._split(text)
self.check(result,
["Hello", " ", "there", " ", "--", " ", "you", " ", "goof-",
"ball,", " ", "use", " ", "the", " ", "-b", " ", "option!"])
def test_break_on_hyphens(self):
# Ensure that the break_on_hyphens attributes work
text = "yaba daba-doo"
self.check_wrap(text, 10, ["yaba daba-", "doo"],
break_on_hyphens=True)
self.check_wrap(text, 10, ["yaba", "daba-doo"],
break_on_hyphens=False)
def test_bad_width(self):
# Ensure that width <= 0 is caught.
text = "Whatever, it doesn't matter."
self.assertRaises(ValueError, wrap, text, 0)
self.assertRaises(ValueError, wrap, text, -1)
class LongWordTestCase (BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper()
self.text = '''\
Did you say "supercalifragilisticexpialidocious?"
How *do* you spell that odd word, anyways?
'''
def test_break_long(self):
# Wrap text with long words and lots of punctuation
self.check_wrap(self.text, 30,
['Did you say "supercalifragilis',
'ticexpialidocious?" How *do*',
'you spell that odd word,',
'anyways?'])
self.check_wrap(self.text, 50,
['Did you say "supercalifragilisticexpialidocious?"',
'How *do* you spell that odd word, anyways?'])
# SF bug 797650. Prevent an infinite loop by making sure that at
# least one character gets split off on every pass.
self.check_wrap('-'*10+'hello', 10,
['----------',
' h',
' e',
' l',
' l',
' o'],
subsequent_indent = ' '*15)
# bug 1146. Prevent a long word to be wrongly wrapped when the
# preceding word is exactly one character shorter than the width
self.check_wrap(self.text, 12,
['Did you say ',
'"supercalifr',
'agilisticexp',
'ialidocious?',
'" How *do*',
'you spell',
'that odd',
'word,',
'anyways?'])
def test_nobreak_long(self):
# Test with break_long_words disabled
self.wrapper.break_long_words = 0
self.wrapper.width = 30
expect = ['Did you say',
'"supercalifragilisticexpialidocious?"',
'How *do* you spell that odd',
'word, anyways?'
]
result = self.wrapper.wrap(self.text)
self.check(result, expect)
# Same thing with kwargs passed to standalone wrap() function.
result = wrap(self.text, width=30, break_long_words=0)
self.check(result, expect)
class IndentTestCases(BaseTestCase):
# called before each test method
def setUp(self):
self.text = '''\
This paragraph will be filled, first without any indentation,
and then with some (including a hanging indent).'''
def test_fill(self):
# Test the fill() method
expect = '''\
This paragraph will be filled, first
without any indentation, and then with
some (including a hanging indent).'''
result = fill(self.text, 40)
self.check(result, expect)
def test_initial_indent(self):
# Test initial_indent parameter
expect = [" This paragraph will be filled,",
"first without any indentation, and then",
"with some (including a hanging indent)."]
result = wrap(self.text, 40, initial_indent=" ")
self.check(result, expect)
expect = "\n".join(expect)
result = fill(self.text, 40, initial_indent=" ")
self.check(result, expect)
def test_subsequent_indent(self):
# Test subsequent_indent parameter
expect = '''\
* This paragraph will be filled, first
without any indentation, and then
with some (including a hanging
indent).'''
result = fill(self.text, 40,
initial_indent=" * ", subsequent_indent=" ")
self.check(result, expect)
# Despite the similar names, DedentTestCase is *not* the inverse
# of IndentTestCase!
class DedentTestCase(unittest.TestCase):
def assertUnchanged(self, text):
"""assert that dedent() has no effect on 'text'"""
self.assertEquals(text, dedent(text))
def test_dedent_nomargin(self):
# No lines indented.
text = "Hello there.\nHow are you?\nOh good, I'm glad."
self.assertUnchanged(text)
# Similar, with a blank line.
text = "Hello there.\n\nBoo!"
self.assertUnchanged(text)
# Some lines indented, but overall margin is still zero.
text = "Hello there.\n This is indented."
self.assertUnchanged(text)
# Again, add a blank line.
text = "Hello there.\n\n Boo!\n"
self.assertUnchanged(text)
def test_dedent_even(self):
# All lines indented by two spaces.
text = " Hello there.\n How are ya?\n Oh good."
expect = "Hello there.\nHow are ya?\nOh good."
self.assertEquals(expect, dedent(text))
# Same, with blank lines.
text = " Hello there.\n\n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEquals(expect, dedent(text))
# Now indent one of the blank lines.
text = " Hello there.\n \n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEquals(expect, dedent(text))
def test_dedent_uneven(self):
# Lines indented unevenly.
text = '''\
def foo():
while 1:
return foo
'''
expect = '''\
def foo():
while 1:
return foo
'''
self.assertEquals(expect, dedent(text))
# Uneven indentation with a blank line.
text = " Foo\n Bar\n\n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEquals(expect, dedent(text))
# Uneven indentation with a whitespace-only line.
text = " Foo\n Bar\n \n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEquals(expect, dedent(text))
# dedent() should not mangle internal tabs
def test_dedent_preserve_internal_tabs(self):
text = " hello\tthere\n how are\tyou?"
expect = "hello\tthere\nhow are\tyou?"
self.assertEquals(expect, dedent(text))
# make sure that it preserves tabs when it's not making any
# changes at all
self.assertEquals(expect, dedent(expect))
# dedent() should not mangle tabs in the margin (i.e.
# tabs and spaces both count as margin, but are *not*
# considered equivalent)
def test_dedent_preserve_margin_tabs(self):
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# same effect even if we have 8 spaces
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# dedent() only removes whitespace that can be uniformly removed!
text = "\thello there\n\thow are you?"
expect = "hello there\nhow are you?"
self.assertEquals(expect, dedent(text))
text = " \thello there\n \thow are you?"
self.assertEquals(expect, dedent(text))
text = " \t hello there\n \t how are you?"
self.assertEquals(expect, dedent(text))
text = " \thello there\n \t how are you?"
expect = "hello there\n how are you?"
self.assertEquals(expect, dedent(text))
def test_main():
test_support.run_unittest(WrapTestCase,
LongWordTestCase,
IndentTestCases,
DedentTestCase)
if __name__ == '__main__':
test_main()
|
apache-2.0
|
louietsai/python-for-android
|
python-modules/twisted/twisted/test/test_hook.py
|
81
|
4290
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.hook module.
"""
from twisted.python import hook
from twisted.trial import unittest
class BaseClass:
"""
dummy class to help in testing.
"""
def __init__(self):
"""
dummy initializer
"""
self.calledBasePre = 0
self.calledBasePost = 0
self.calledBase = 0
def func(self, a, b):
"""
dummy method
"""
assert a == 1
assert b == 2
self.calledBase = self.calledBase + 1
class SubClass(BaseClass):
"""
another dummy class
"""
def __init__(self):
"""
another dummy initializer
"""
BaseClass.__init__(self)
self.calledSubPre = 0
self.calledSubPost = 0
self.calledSub = 0
def func(self, a, b):
"""
another dummy function
"""
assert a == 1
assert b == 2
BaseClass.func(self, a, b)
self.calledSub = self.calledSub + 1
_clean_BaseClass = BaseClass.__dict__.copy()
_clean_SubClass = SubClass.__dict__.copy()
def basePre(base, a, b):
"""
a pre-hook for the base class
"""
base.calledBasePre = base.calledBasePre + 1
def basePost(base, a, b):
"""
a post-hook for the base class
"""
base.calledBasePost = base.calledBasePost + 1
def subPre(sub, a, b):
"""
a pre-hook for the subclass
"""
sub.calledSubPre = sub.calledSubPre + 1
def subPost(sub, a, b):
"""
a post-hook for the subclass
"""
sub.calledSubPost = sub.calledSubPost + 1
class HookTestCase(unittest.TestCase):
"""
test case to make sure hooks are called
"""
def setUp(self):
"""Make sure we have clean versions of our classes."""
BaseClass.__dict__.clear()
BaseClass.__dict__.update(_clean_BaseClass)
SubClass.__dict__.clear()
SubClass.__dict__.update(_clean_SubClass)
def testBaseHook(self):
"""make sure that the base class's hook is called reliably
"""
base = BaseClass()
self.assertEquals(base.calledBase, 0)
self.assertEquals(base.calledBasePre, 0)
base.func(1,2)
self.assertEquals(base.calledBase, 1)
self.assertEquals(base.calledBasePre, 0)
hook.addPre(BaseClass, "func", basePre)
base.func(1, b=2)
self.assertEquals(base.calledBase, 2)
self.assertEquals(base.calledBasePre, 1)
hook.addPost(BaseClass, "func", basePost)
base.func(1, b=2)
self.assertEquals(base.calledBasePost, 1)
self.assertEquals(base.calledBase, 3)
self.assertEquals(base.calledBasePre, 2)
hook.removePre(BaseClass, "func", basePre)
hook.removePost(BaseClass, "func", basePost)
base.func(1, b=2)
self.assertEquals(base.calledBasePost, 1)
self.assertEquals(base.calledBase, 4)
self.assertEquals(base.calledBasePre, 2)
def testSubHook(self):
"""test interactions between base-class hooks and subclass hooks
"""
sub = SubClass()
self.assertEquals(sub.calledSub, 0)
self.assertEquals(sub.calledBase, 0)
sub.func(1, b=2)
self.assertEquals(sub.calledSub, 1)
self.assertEquals(sub.calledBase, 1)
hook.addPre(SubClass, 'func', subPre)
self.assertEquals(sub.calledSub, 1)
self.assertEquals(sub.calledBase, 1)
self.assertEquals(sub.calledSubPre, 0)
self.assertEquals(sub.calledBasePre, 0)
sub.func(1, b=2)
self.assertEquals(sub.calledSub, 2)
self.assertEquals(sub.calledBase, 2)
self.assertEquals(sub.calledSubPre, 1)
self.assertEquals(sub.calledBasePre, 0)
# let the pain begin
hook.addPre(BaseClass, 'func', basePre)
BaseClass.func(sub, 1, b=2)
# sub.func(1, b=2)
self.assertEquals(sub.calledBase, 3)
self.assertEquals(sub.calledBasePre, 1, str(sub.calledBasePre))
sub.func(1, b=2)
self.assertEquals(sub.calledBasePre, 2)
self.assertEquals(sub.calledBase, 4)
self.assertEquals(sub.calledSubPre, 2)
self.assertEquals(sub.calledSub, 3)
testCases = [HookTestCase]
|
apache-2.0
|
alexpilotti/stackalytics-1
|
stackalytics/openstack/common/importutils.py
|
19
|
2186
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default
|
apache-2.0
|
EmpireProject/Empire
|
lib/common/encryption.py
|
7
|
16803
|
"""
Empire encryption functions.
Includes:
pad() - performs PKCS#7 padding
depad() - Performs PKCS#7 depadding
rsa_xml_to_key() - parses a PowerShell RSA xml import and builds a M2Crypto object
rsa_encrypt() - encrypts data using the M2Crypto crypto object
aes_encrypt() - encrypts data using a Cryptography AES object
aes_encrypt_then_hmac() - encrypts and SHA256 HMACs data using a Cryptography AES object
aes_decrypt() - decrypts data using a Cryptography AES object
verify_hmac() - verifies a SHA256 HMAC for a data blob
aes_decrypt_and_verify() - AES decrypts data if the HMAC is validated
generate_aes_key() - generates a ranodm AES key using the OS' Random functionality
rc4() - encrypt/decrypt a data blob using an RC4 key
DiffieHellman() - Mark Loiseau's DiffieHellman implementation, see ./data/licenses/ for license info
"""
import base64
import hashlib
import hmac
import os
import string
import M2Crypto
import os
import random
from xml.dom.minidom import parseString
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from binascii import hexlify
def to_bufferable(binary):
return binary
def _get_byte(c):
return ord(c)
# Python 3 compatibility stuffz
try:
xrange
except Exception:
xrange = range
def to_bufferable(binary):
if isinstance(binary, bytes):
return binary
return bytes(ord(b) for b in binary)
def _get_byte(c):
return c
# If a secure random number generator is unavailable, exit with an error.
try:
import ssl
random_function = ssl.RAND_bytes
random_provider = "Python SSL"
except:
random_function = os.urandom
random_provider = "os.urandom"
def pad(data):
"""
Performs PKCS#7 padding for 128 bit block size.
"""
pad = 16 - (len(data) % 16)
return data + to_bufferable(chr(pad) * pad)
# return str(s) + chr(16 - len(str(s)) % 16) * (16 - len(str(s)) % 16)
def depad(data):
"""
Performs PKCS#7 depadding for 128 bit block size.
"""
if len(data) % 16 != 0:
raise ValueError("invalid length")
pad = _get_byte(data[-1])
return data[:-pad]
# return s[:-(ord(s[-1]))]
def rsa_xml_to_key(xml):
"""
Parse powershell RSA.ToXmlString() public key string and
return a M2Crypto key object.
Used during PowerShell RSA-EKE key exchange in agents.py.
Reference- http://stackoverflow.com/questions/10367072/m2crypto-import-keys-from-non-standard-file
"""
try:
# parse the xml DOM and extract the exponent/modulus
dom = parseString(xml)
e = base64.b64decode(dom.getElementsByTagName('Exponent')[0].childNodes[0].data)
n = base64.b64decode(dom.getElementsByTagName('Modulus')[0].childNodes[0].data)
# build the new key
key = M2Crypto.RSA.new_pub_key((
M2Crypto.m2.bn_to_mpi(M2Crypto.m2.hex_to_bn(hexlify(e))),
M2Crypto.m2.bn_to_mpi(M2Crypto.m2.hex_to_bn(hexlify(n))),
))
return key
# if there's an XML parsing error, return None
except:
return None
def rsa_encrypt(key, data):
"""
Take a M2Crypto key object and use it to encrypt the passed data.
"""
return key.public_encrypt(data, M2Crypto.RSA.pkcs1_padding)
def aes_encrypt(key, data):
"""
Generate a random IV and new AES cipher object with the given
key, and return IV + encryptedData.
"""
backend = default_backend()
IV = os.urandom(16)
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
encryptor = cipher.encryptor()
ct = encryptor.update(pad(data))+encryptor.finalize()
return IV + ct
def aes_encrypt_then_hmac(key, data):
"""
Encrypt the data then calculate HMAC over the ciphertext.
"""
data = aes_encrypt(key, data)
mac = hmac.new(str(key), data, hashlib.sha256).digest()
return data + mac[0:10]
def aes_decrypt(key, data):
"""
Generate an AES cipher object, pull out the IV from the data
and return the unencrypted data.
"""
if len(data) > 16:
backend = default_backend()
IV = data[0:16]
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
decryptor = cipher.decryptor()
pt = depad(decryptor.update(data[16:])+decryptor.finalize())
return pt
def verify_hmac(key, data):
"""
Verify the HMAC supplied in the data with the given key.
"""
if len(data) > 20:
mac = data[-10:]
data = data[:-10]
expected = hmac.new(key, data, hashlib.sha256).digest()[0:10]
# Double HMAC to prevent timing attacks. hmac.compare_digest() is
# preferable, but only available since Python 2.7.7.
return hmac.new(str(key), expected).digest() == hmac.new(str(key), mac).digest()
else:
return False
def aes_decrypt_and_verify(key, data):
"""
Decrypt the data, but only if it has a valid MAC.
"""
if len(data) > 32 and verify_hmac(key, data):
return aes_decrypt(key, data[:-10])
raise Exception("Invalid ciphertext received.")
def generate_aes_key():
"""
Generate a random new 128-bit AES key using OS' secure Random functions.
"""
punctuation = '!#$%&()*+,-./:;<=>?@[\]^_`{|}~'
return ''.join(random.sample(string.ascii_letters + string.digits + '!#$%&()*+,-./:;<=>?@[\]^_`{|}~', 32))
def rc4(key, data):
"""
RC4 encrypt/decrypt the given data input with the specified key.
From: http://stackoverflow.com/questions/29607753/how-to-decrypt-a-file-that-encrypted-with-rc4-using-python
"""
S, j, out = range(256), 0, []
# KSA Phase
for i in range(256):
j = (j + S[i] + ord(key[i % len(key)])) % 256
S[i], S[j] = S[j], S[i]
# PRGA Phase
i = j = 0
for char in data:
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i]
out.append(chr(ord(char) ^ S[(S[i] + S[j]) % 256]))
return ''.join(out)
class DiffieHellman(object):
"""
A reference implementation of the Diffie-Hellman protocol.
By default, this class uses the 6144-bit MODP Group (Group 17) from RFC 3526.
This prime is sufficient to generate an AES 256 key when used with
a 540+ bit exponent.
Authored by Mark Loiseau's implementation at https://github.com/lowazo/pyDHE
version 3.0 of the GNU General Public License
see ./data/licenses/pyDHE_license.txt for license info
Also used in ./data/agent/stager.py for the Python key-negotiation stager
"""
def __init__(self, generator=2, group=17, keyLength=540):
"""
Generate the public and private keys.
"""
min_keyLength = 180
default_generator = 2
valid_generators = [2, 3, 5, 7]
# Sanity check fors generator and keyLength
if(generator not in valid_generators):
print("Error: Invalid generator. Using default.")
self.generator = default_generator
else:
self.generator = generator
if(keyLength < min_keyLength):
print("Error: keyLength is too small. Setting to minimum.")
self.keyLength = min_keyLength
else:
self.keyLength = keyLength
self.prime = self.getPrime(group)
self.privateKey = self.genPrivateKey(keyLength)
self.publicKey = self.genPublicKey()
def getPrime(self, group=17):
"""
Given a group number, return a prime.
"""
default_group = 17
primes = {
5: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF,
14: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF,
15: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF,
16: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF,
17:
0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF,
18:
0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF
}
if group in primes.keys():
return primes[group]
else:
print("Error: No prime with group %i. Using default." % group)
return primes[default_group]
def genRandom(self, bits):
"""
Generate a random number with the specified number of bits
"""
_rand = 0
_bytes = bits // 8 + 8
while(len(bin(_rand))-2 < bits):
try:
# Python 3
_rand = int.from_bytes(random_function(_bytes), byteorder='big')
except:
# Python 2
_rand = int(random_function(_bytes).encode('hex'), 16)
return _rand
def genPrivateKey(self, bits):
"""
Generate a private key using a secure random number generator.
"""
return self.genRandom(bits)
def genPublicKey(self):
"""
Generate a public key X with g**x % p.
"""
return pow(self.generator, self.privateKey, self.prime)
def checkPublicKey(self, otherKey):
"""
Check the other party's public key to make sure it's valid.
Since a safe prime is used, verify that the Legendre symbol == 1
"""
if(otherKey > 2 and otherKey < self.prime - 1):
if(pow(otherKey, (self.prime - 1)//2, self.prime) == 1):
return True
return False
def genSecret(self, privateKey, otherKey):
"""
Check to make sure the public key is valid, then combine it with the
private key to generate a shared secret.
"""
if(self.checkPublicKey(otherKey) is True):
sharedSecret = pow(otherKey, privateKey, self.prime)
return sharedSecret
else:
raise Exception("Invalid public key.")
def genKey(self, otherKey):
"""
Derive the shared secret, then hash it to obtain the shared key.
"""
self.sharedSecret = self.genSecret(self.privateKey, otherKey)
# Convert the shared secret (int) to an array of bytes in network order
# Otherwise hashlib can't hash it.
try:
_sharedSecretBytes = self.sharedSecret.to_bytes(
len(bin(self.sharedSecret))-2 // 8 + 1, byteorder="big")
except AttributeError:
_sharedSecretBytes = str(self.sharedSecret)
s = hashlib.sha256()
s.update(bytes(_sharedSecretBytes))
self.key = s.digest()
def getKey(self):
"""
Return the shared secret key
"""
return self.key
|
bsd-3-clause
|
thepiper/standoff
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py
|
618
|
21334
|
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
|
gpl-3.0
|
mathLab/RBniCS
|
rbnics/problems/elliptic/elliptic_coercive_compliant_rb_reduced_problem.py
|
1
|
1383
|
# Copyright (C) 2015-2021 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from math import sqrt
from numpy import isclose
from rbnics.problems.elliptic.elliptic_coercive_compliant_problem import EllipticCoerciveCompliantProblem
from rbnics.problems.elliptic.elliptic_coercive_compliant_reduced_problem import EllipticCoerciveCompliantReducedProblem
from rbnics.problems.elliptic.elliptic_coercive_rb_reduced_problem import EllipticCoerciveRBReducedProblem
from rbnics.reduction_methods.elliptic import EllipticRBReduction
from rbnics.utils.decorators import ReducedProblemFor
EllipticCoerciveCompliantRBReducedProblem_Base = EllipticCoerciveCompliantReducedProblem(
EllipticCoerciveRBReducedProblem)
@ReducedProblemFor(EllipticCoerciveCompliantProblem, EllipticRBReduction)
class EllipticCoerciveCompliantRBReducedProblem(EllipticCoerciveCompliantRBReducedProblem_Base):
# Return an error bound for the current solution
def estimate_error(self):
eps2 = self.get_residual_norm_squared()
beta = self.truth_problem.get_stability_factor_lower_bound()
assert eps2 >= 0. or isclose(eps2, 0.)
assert beta >= 0.
return sqrt(abs(eps2) / beta)
# Return an error bound for the current compliant output
def estimate_error_output(self):
return self.estimate_error()**2
|
lgpl-3.0
|
Buggaarde/youtube-dl
|
youtube_dl/extractor/livestream.py
|
107
|
10261
|
from __future__ import unicode_literals
import re
import json
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import (
ExtractorError,
find_xpath_attr,
int_or_none,
orderedSet,
xpath_with_ns,
)
class LivestreamIE(InfoExtractor):
IE_NAME = 'livestream'
_VALID_URL = r'https?://(?:new\.)?livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])'
_TESTS = [{
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
'md5': '53274c76ba7754fb0e8d072716f2292b',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': 'Live from Webster Hall NYC',
'upload_date': '20121012',
'like_count': int,
'view_count': int,
'thumbnail': 're:^http://.*\.jpg$'
}
}, {
'url': 'http://new.livestream.com/tedx/cityenglish',
'info_dict': {
'title': 'TEDCity2.0 (English)',
'id': '2245590',
},
'playlist_mincount': 4,
}, {
'url': 'http://new.livestream.com/chess24/tatasteelchess',
'info_dict': {
'title': 'Tata Steel Chess',
'id': '3705884',
},
'playlist_mincount': 60,
}, {
'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640',
'only_matching': True,
}, {
'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015',
'only_matching': True,
}]
def _parse_smil(self, video_id, smil_url):
formats = []
_SWITCH_XPATH = (
'.//{http://www.w3.org/2001/SMIL20/Language}body/'
'{http://www.w3.org/2001/SMIL20/Language}switch')
smil_doc = self._download_xml(
smil_url, video_id,
note='Downloading SMIL information',
errnote='Unable to download SMIL information',
fatal=False)
if smil_doc is False: # Download failed
return formats
title_node = find_xpath_attr(
smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta',
'name', 'title')
if title_node is None:
self.report_warning('Cannot find SMIL id')
switch_node = smil_doc.find(_SWITCH_XPATH)
else:
title_id = title_node.attrib['content']
switch_node = find_xpath_attr(
smil_doc, _SWITCH_XPATH, 'id', title_id)
if switch_node is None:
raise ExtractorError('Cannot find switch node')
video_nodes = switch_node.findall(
'{http://www.w3.org/2001/SMIL20/Language}video')
for vn in video_nodes:
tbr = int_or_none(vn.attrib.get('system-bitrate'))
furl = (
'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145' %
(vn.attrib['src']))
if 'clipBegin' in vn.attrib:
furl += '&ssek=' + vn.attrib['clipBegin']
formats.append({
'url': furl,
'format_id': 'smil_%d' % tbr,
'ext': 'flv',
'tbr': tbr,
'preference': -1000,
})
return formats
def _extract_video_info(self, video_data):
video_id = compat_str(video_data['id'])
FORMAT_KEYS = (
('sd', 'progressive_url'),
('hd', 'progressive_url_hd'),
)
formats = [{
'format_id': format_id,
'url': video_data[key],
'quality': i + 1,
} for i, (format_id, key) in enumerate(FORMAT_KEYS)
if video_data.get(key)]
smil_url = video_data.get('smil_url')
if smil_url:
formats.extend(self._parse_smil(video_id, smil_url))
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': video_data['caption'],
'thumbnail': video_data.get('thumbnail_url'),
'upload_date': video_data['updated_at'].replace('-', '')[:8],
'like_count': video_data.get('likes', {}).get('total'),
'view_count': video_data.get('views'),
}
def _extract_event(self, info):
event_id = compat_str(info['id'])
account = compat_str(info['owner_account_id'])
root_url = (
'https://new.livestream.com/api/accounts/{account}/events/{event}/'
'feed.json'.format(account=account, event=event_id))
def _extract_videos():
last_video = None
for i in itertools.count(1):
if last_video is None:
info_url = root_url
else:
info_url = '{root}?&id={id}&newer=-1&type=video'.format(
root=root_url, id=last_video)
videos_info = self._download_json(info_url, event_id, 'Downloading page {0}'.format(i))['data']
videos_info = [v['data'] for v in videos_info if v['type'] == 'video']
if not videos_info:
break
for v in videos_info:
yield self._extract_video_info(v)
last_video = videos_info[-1]['id']
return self.playlist_result(_extract_videos(), event_id, info['full_name'])
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
event_name = mobj.group('event_name')
webpage = self._download_webpage(url, video_id or event_name)
og_video = self._og_search_video_url(
webpage, 'player url', fatal=False, default=None)
if og_video is not None:
query_str = compat_urllib_parse_urlparse(og_video).query
query = compat_urlparse.parse_qs(query_str)
if 'play_url' in query:
api_url = query['play_url'][0].replace('.smil', '')
info = json.loads(self._download_webpage(
api_url, video_id, 'Downloading video info'))
return self._extract_video_info(info)
config_json = self._search_regex(
r'window.config = ({.*?});', webpage, 'window config')
info = json.loads(config_json)['event']
def is_relevant(vdata, vid):
result = vdata['type'] == 'video'
if video_id is not None:
result = result and compat_str(vdata['data']['id']) == vid
return result
if video_id is None:
# This is an event page:
return self._extract_event(info)
else:
videos = [self._extract_video_info(video_data['data'])
for video_data in info['feed']['data']
if is_relevant(video_data, video_id)]
if not videos:
raise ExtractorError('Cannot find video %s' % video_id)
return videos[0]
# The original version of Livestream uses a different system
class LivestreamOriginalIE(InfoExtractor):
IE_NAME = 'livestream:original'
_VALID_URL = r'''(?x)https?://original\.livestream\.com/
(?P<user>[^/]+)/(?P<type>video|folder)
(?:\?.*?Id=|/)(?P<id>.*?)(&|$)
'''
_TESTS = [{
'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'info_dict': {
'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'ext': 'mp4',
'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
},
}, {
'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
'info_dict': {
'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
},
'playlist_mincount': 4,
}]
def _extract_video(self, user, video_id):
api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id)
info = self._download_xml(api_url, video_id)
# this url is used on mobile devices
stream_url = 'http://x{0}x.api.channel.livestream.com/3.0/getstream.json?id={1}'.format(user, video_id)
stream_info = self._download_json(stream_url, video_id)
item = info.find('channel').find('item')
ns = {'media': 'http://search.yahoo.com/mrss'}
thumbnail_url = item.find(xpath_with_ns('media:thumbnail', ns)).attrib['url']
return {
'id': video_id,
'title': item.find('title').text,
'url': stream_info['progressiveUrl'],
'thumbnail': thumbnail_url,
}
def _extract_folder(self, url, folder_id):
webpage = self._download_webpage(url, folder_id)
paths = orderedSet(re.findall(
r'''(?x)(?:
<li\s+class="folder">\s*<a\s+href="|
<a\s+href="(?=https?://livestre\.am/)
)([^"]+)"''', webpage))
return {
'_type': 'playlist',
'id': folder_id,
'entries': [{
'_type': 'url',
'url': compat_urlparse.urljoin(url, p),
} for p in paths],
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
id = mobj.group('id')
user = mobj.group('user')
url_type = mobj.group('type')
if url_type == 'folder':
return self._extract_folder(url, id)
else:
return self._extract_video(user, id)
# The server doesn't support HEAD request, the generic extractor can't detect
# the redirection
class LivestreamShortenerIE(InfoExtractor):
IE_NAME = 'livestream:shortener'
IE_DESC = False # Do not list
_VALID_URL = r'https?://livestre\.am/(?P<id>.+)'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
id = mobj.group('id')
webpage = self._download_webpage(url, id)
return {
'_type': 'url',
'url': self._og_search_url(webpage),
}
|
unlicense
|
ActionAdam/osmc
|
package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0a4.py
|
252
|
4437
|
data = (
'qiet', # 0x00
'qiex', # 0x01
'qie', # 0x02
'qiep', # 0x03
'quot', # 0x04
'quox', # 0x05
'quo', # 0x06
'quop', # 0x07
'qot', # 0x08
'qox', # 0x09
'qo', # 0x0a
'qop', # 0x0b
'qut', # 0x0c
'qux', # 0x0d
'qu', # 0x0e
'qup', # 0x0f
'qurx', # 0x10
'qur', # 0x11
'qyt', # 0x12
'qyx', # 0x13
'qy', # 0x14
'qyp', # 0x15
'qyrx', # 0x16
'qyr', # 0x17
'jjit', # 0x18
'jjix', # 0x19
'jji', # 0x1a
'jjip', # 0x1b
'jjiet', # 0x1c
'jjiex', # 0x1d
'jjie', # 0x1e
'jjiep', # 0x1f
'jjuox', # 0x20
'jjuo', # 0x21
'jjuop', # 0x22
'jjot', # 0x23
'jjox', # 0x24
'jjo', # 0x25
'jjop', # 0x26
'jjut', # 0x27
'jjux', # 0x28
'jju', # 0x29
'jjup', # 0x2a
'jjurx', # 0x2b
'jjur', # 0x2c
'jjyt', # 0x2d
'jjyx', # 0x2e
'jjy', # 0x2f
'jjyp', # 0x30
'njit', # 0x31
'njix', # 0x32
'nji', # 0x33
'njip', # 0x34
'njiet', # 0x35
'njiex', # 0x36
'njie', # 0x37
'njiep', # 0x38
'njuox', # 0x39
'njuo', # 0x3a
'njot', # 0x3b
'njox', # 0x3c
'njo', # 0x3d
'njop', # 0x3e
'njux', # 0x3f
'nju', # 0x40
'njup', # 0x41
'njurx', # 0x42
'njur', # 0x43
'njyt', # 0x44
'njyx', # 0x45
'njy', # 0x46
'njyp', # 0x47
'njyrx', # 0x48
'njyr', # 0x49
'nyit', # 0x4a
'nyix', # 0x4b
'nyi', # 0x4c
'nyip', # 0x4d
'nyiet', # 0x4e
'nyiex', # 0x4f
'nyie', # 0x50
'nyiep', # 0x51
'nyuox', # 0x52
'nyuo', # 0x53
'nyuop', # 0x54
'nyot', # 0x55
'nyox', # 0x56
'nyo', # 0x57
'nyop', # 0x58
'nyut', # 0x59
'nyux', # 0x5a
'nyu', # 0x5b
'nyup', # 0x5c
'xit', # 0x5d
'xix', # 0x5e
'xi', # 0x5f
'xip', # 0x60
'xiet', # 0x61
'xiex', # 0x62
'xie', # 0x63
'xiep', # 0x64
'xuox', # 0x65
'xuo', # 0x66
'xot', # 0x67
'xox', # 0x68
'xo', # 0x69
'xop', # 0x6a
'xyt', # 0x6b
'xyx', # 0x6c
'xy', # 0x6d
'xyp', # 0x6e
'xyrx', # 0x6f
'xyr', # 0x70
'yit', # 0x71
'yix', # 0x72
'yi', # 0x73
'yip', # 0x74
'yiet', # 0x75
'yiex', # 0x76
'yie', # 0x77
'yiep', # 0x78
'yuot', # 0x79
'yuox', # 0x7a
'yuo', # 0x7b
'yuop', # 0x7c
'yot', # 0x7d
'yox', # 0x7e
'yo', # 0x7f
'yop', # 0x80
'yut', # 0x81
'yux', # 0x82
'yu', # 0x83
'yup', # 0x84
'yurx', # 0x85
'yur', # 0x86
'yyt', # 0x87
'yyx', # 0x88
'yy', # 0x89
'yyp', # 0x8a
'yyrx', # 0x8b
'yyr', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'Qot', # 0x90
'Li', # 0x91
'Kit', # 0x92
'Nyip', # 0x93
'Cyp', # 0x94
'Ssi', # 0x95
'Ggop', # 0x96
'Gep', # 0x97
'Mi', # 0x98
'Hxit', # 0x99
'Lyr', # 0x9a
'Bbut', # 0x9b
'Mop', # 0x9c
'Yo', # 0x9d
'Put', # 0x9e
'Hxuo', # 0x9f
'Tat', # 0xa0
'Ga', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'Ddur', # 0xa4
'Bur', # 0xa5
'Gguo', # 0xa6
'Nyop', # 0xa7
'Tu', # 0xa8
'Op', # 0xa9
'Jjut', # 0xaa
'Zot', # 0xab
'Pyt', # 0xac
'Hmo', # 0xad
'Yit', # 0xae
'Vur', # 0xaf
'Shy', # 0xb0
'Vep', # 0xb1
'Za', # 0xb2
'Jo', # 0xb3
'[?]', # 0xb4
'Jjy', # 0xb5
'Got', # 0xb6
'Jjie', # 0xb7
'Wo', # 0xb8
'Du', # 0xb9
'Shur', # 0xba
'Lie', # 0xbb
'Cy', # 0xbc
'Cuop', # 0xbd
'Cip', # 0xbe
'Hxop', # 0xbf
'Shat', # 0xc0
'[?]', # 0xc1
'Shop', # 0xc2
'Che', # 0xc3
'Zziet', # 0xc4
'[?]', # 0xc5
'Ke', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.