prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
<|fim_middle|>
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
<|fim_middle|>
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError() |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
<|fim_middle|>
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | vstruct.VArray.__init__(self)
self._C = C |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
<|fim_middle|>
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
<|fim_middle|>
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | raise NotImplementedError() |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
<|fim_middle|>
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("") |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
<|fim_middle|>
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32) |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
<|fim_middle|>
if __name__ == "__main__":
main()
<|fim▁end|> | import sys
return sys.exit(_main(*sys.argv[1:])) |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
<|fim_middle|>
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | d = x86.i386Disasm() |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
<|fim_middle|>
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | d = x64.Amd64Disasm() |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
<|fim_middle|>
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | raise RuntimeError('unknown arch: ' + str(arch)) |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
<|fim_middle|>
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | break |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
<|fim_middle|>
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32) |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
<|fim_middle|>
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | continue |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
<|fim_middle|>
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32) |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
<|fim_middle|>
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32) |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
<|fim_middle|>
<|fim▁end|> | main() |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def <|fim_middle|>(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | disassemble |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def <|fim_middle|>(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | __init__ |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def <|fim_middle|>(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | vsParse |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def <|fim_middle|>(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | vsParseFd |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def <|fim_middle|>(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | dump_patch |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def <|fim_middle|>(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | _main |
<|file_name|>sdb_dump_patch.py<|end_file_name|><|fim▁begin|>import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def <|fim_middle|>():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
<|fim▁end|> | main |
<|file_name|>test_mongodb.py<|end_file_name|><|fim▁begin|># lint-amnesty, pylint: disable=missing-module-docstring
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
<|fim▁hole|>class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def test_mongo_backend(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
_, args, _ = call
return args[0]
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1])<|fim▁end|> | |
<|file_name|>test_mongodb.py<|end_file_name|><|fim▁begin|># lint-amnesty, pylint: disable=missing-module-docstring
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
<|fim_middle|>
<|fim▁end|> | def setUp(self):
super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def test_mongo_backend(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
_, args, _ = call
return args[0]
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1]) |
<|file_name|>test_mongodb.py<|end_file_name|><|fim▁begin|># lint-amnesty, pylint: disable=missing-module-docstring
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
<|fim_middle|>
def test_mongo_backend(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
_, args, _ = call
return args[0]
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1])
<|fim▁end|> | super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend() |
<|file_name|>test_mongodb.py<|end_file_name|><|fim▁begin|># lint-amnesty, pylint: disable=missing-module-docstring
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def test_mongo_backend(self):
<|fim_middle|>
<|fim▁end|> | events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
_, args, _ = call
return args[0]
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1]) |
<|file_name|>test_mongodb.py<|end_file_name|><|fim▁begin|># lint-amnesty, pylint: disable=missing-module-docstring
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def test_mongo_backend(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
<|fim_middle|>
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1])
<|fim▁end|> | _, args, _ = call
return args[0] |
<|file_name|>test_mongodb.py<|end_file_name|><|fim▁begin|># lint-amnesty, pylint: disable=missing-module-docstring
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def <|fim_middle|>(self):
super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def test_mongo_backend(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
_, args, _ = call
return args[0]
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1])
<|fim▁end|> | setUp |
<|file_name|>test_mongodb.py<|end_file_name|><|fim▁begin|># lint-amnesty, pylint: disable=missing-module-docstring
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def <|fim_middle|>(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
_, args, _ = call
return args[0]
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1])
<|fim▁end|> | test_mongo_backend |
<|file_name|>test_mongodb.py<|end_file_name|><|fim▁begin|># lint-amnesty, pylint: disable=missing-module-docstring
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def test_mongo_backend(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def <|fim_middle|>(call):
_, args, _ = call
return args[0]
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1])
<|fim▁end|> | first_argument |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
<|fim▁hole|> def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')<|fim▁end|> | def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''}) |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
<|fim_middle|>
<|fim▁end|> | def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value') |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
<|fim_middle|>
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
<|fim▁end|> | crypto._fernet = None |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
<|fim_middle|>
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
<|fim▁end|> | crypto._fernet = None |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
<|fim_middle|>
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
<|fim▁end|> | """
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value') |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
<|fim_middle|>
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
<|fim▁end|> | """
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value') |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
<|fim_middle|>
<|fim▁end|> | """
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value') |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def <|fim_middle|>(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
<|fim▁end|> | setUp |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def <|fim_middle|>(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
<|fim▁end|> | tearDown |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def <|fim_middle|>(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
<|fim▁end|> | test_variable_no_encryption |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def <|fim_middle|>(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
<|fim▁end|> | test_variable_with_encryption |
<|file_name|>test_variable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def <|fim_middle|>(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
<|fim▁end|> | test_var_with_encryption_rotate_fernet_key |
<|file_name|>cpg_gene.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Purpose:
This script, using default values, determines and plots the CpG islands in
relation to a given feature "type" (e.g. "gene" or "mRNA") from a GFF file
which corresponds to the user-provided fasta file.<|fim▁hole|>
Note:
CpG Islands are determined by ObEx = (Observed CpG) / (Expected CpG) ,
default threshold > 1.
Where Expected CpG = (count(C) * count(G)) / WindowSize
Usage:
python cpg_gene.py FastaFile Gff_File OutFile.png
Default optional parameters:
-s, Step Size, default = 50
-w, Window Size, default = 200
-oe, Minimum Observed Expected CpG, default = 1
-gc, Minimum GC, default = .5
-r Range from ATG, or provided feature, default = 5000
-f, GFF Feature, default = "gene"
-i, Gene ID from GFF, default = ""
'''
import sys
import os
import argparse
from collections import Counter
from Bio import SeqIO
import cpgmod
import gffutils
import pandas as pd
import numpy as np
from ggplot import *
# Capture command line args, with or without defaults
if __name__ == '__main__':
# Parse the arguments
LineArgs = cpgmod.parseArguments()
# Populate vars with args
FastaFile = LineArgs.FastaFile
GffFile = LineArgs.GffFile
OutFile = LineArgs.FileOut
Step = LineArgs.s
WinSize = LineArgs.w
ObExthresh = LineArgs.oe
GCthresh = LineArgs.gc
StartRange = LineArgs.r
FeatGFF = LineArgs.f
ID_Feat = LineArgs.i
# Gather all possible CpG islands
MergedRecs = []
print "Parsing sequences...\n"
for SeqRecord in SeqIO.parse(FastaFile, "fasta"):
print SeqRecord.id
# Determine if sequences and args are acceptable
cpgmod.arg_seqcheck(SeqRecord, WinSize, Step)
# Pre-determine number of islands
NumOfChunks = cpgmod.chunks(SeqRecord, WinSize, Step)
# Return array of SeqRec class (potential CpG island) instances
SeqRecList = cpgmod.compute(SeqRecord, Step, NumOfChunks, WinSize)
MergedRecs = MergedRecs + SeqRecList
# Create GFF DB
GffDb = gffutils.create_db(GffFile, dbfn='GFF.db', force=True, keep_order=True,
merge_strategy='merge', sort_attribute_values=True,
disable_infer_transcripts=True,
disable_infer_genes=True)
print "\nGFF Database Created...\n"
# Filter out SeqRec below threshold
DistArr = []
for Rec in MergedRecs:
Cond1 = Rec.expect() > 0
if Cond1 == True:
ObEx = (Rec.observ() / Rec.expect())
Cond2 = ObEx > ObExthresh
Cond3 = Rec.gc_cont() > GCthresh
if Cond2 and Cond3:
# Query GFF DB for closest gene feature *or provided feature*
Arr = cpgmod.get_closest(Rec, GffDb, StartRange, FeatGFF, ID_Feat)
if Arr <> False:
Arr.append(ObEx)
DistArr.append(Arr)
print "CpG Islands predicted...\n"
print "Generating Figure...\n"
# Releasing SeqRecs
MergedRecs = None
SeqRecList = None
# Pre-check DistArr Results
if len(DistArr) < 2:
print "WARNING, "+ str(len(DistArr)) + " sites were found."
print "Consider changing parameters.\n"
# Generate Figure:
ObExRes = pd.DataFrame({
'gene' : [],
'xval': [],
'yval': []})
try:
Cnt = 0
for Dist in DistArr:
Cnt += 1
print "PROGRESS: "+str(Cnt) +" of "+ str(len(DistArr))
ObExdf = pd.DataFrame({
'gene': [Dist[2]],
'xval': [Dist[1]],
'yval': [Dist[3]]})
ObExFram = [ObExRes, ObExdf]
ObExRes = pd.concat(ObExFram, ignore_index=True)
p = ggplot(aes(x='xval', y='yval'), data=ObExRes) \
+ geom_point() \
+ ylab("Observed/Expected CpG") \
+ xlab("Position (bp) Relative to (ATG = 0)") \
+ ggtitle("Predicted CpG Island Position Relative to ATG")
p.save(OutFile)
except IndexError as e:
print 'Error: '+ str(e)
sys.exit('Exiting script...')
print p
# Remove GFF DB
os.remove('GFF.db')<|fim▁end|> | |
<|file_name|>cpg_gene.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Purpose:
This script, using default values, determines and plots the CpG islands in
relation to a given feature "type" (e.g. "gene" or "mRNA") from a GFF file
which corresponds to the user-provided fasta file.
Note:
CpG Islands are determined by ObEx = (Observed CpG) / (Expected CpG) ,
default threshold > 1.
Where Expected CpG = (count(C) * count(G)) / WindowSize
Usage:
python cpg_gene.py FastaFile Gff_File OutFile.png
Default optional parameters:
-s, Step Size, default = 50
-w, Window Size, default = 200
-oe, Minimum Observed Expected CpG, default = 1
-gc, Minimum GC, default = .5
-r Range from ATG, or provided feature, default = 5000
-f, GFF Feature, default = "gene"
-i, Gene ID from GFF, default = ""
'''
import sys
import os
import argparse
from collections import Counter
from Bio import SeqIO
import cpgmod
import gffutils
import pandas as pd
import numpy as np
from ggplot import *
# Capture command line args, with or without defaults
if __name__ == '__main__':
# Parse the arguments
<|fim_middle|>
# Populate vars with args
FastaFile = LineArgs.FastaFile
GffFile = LineArgs.GffFile
OutFile = LineArgs.FileOut
Step = LineArgs.s
WinSize = LineArgs.w
ObExthresh = LineArgs.oe
GCthresh = LineArgs.gc
StartRange = LineArgs.r
FeatGFF = LineArgs.f
ID_Feat = LineArgs.i
# Gather all possible CpG islands
MergedRecs = []
print "Parsing sequences...\n"
for SeqRecord in SeqIO.parse(FastaFile, "fasta"):
print SeqRecord.id
# Determine if sequences and args are acceptable
cpgmod.arg_seqcheck(SeqRecord, WinSize, Step)
# Pre-determine number of islands
NumOfChunks = cpgmod.chunks(SeqRecord, WinSize, Step)
# Return array of SeqRec class (potential CpG island) instances
SeqRecList = cpgmod.compute(SeqRecord, Step, NumOfChunks, WinSize)
MergedRecs = MergedRecs + SeqRecList
# Create GFF DB
GffDb = gffutils.create_db(GffFile, dbfn='GFF.db', force=True, keep_order=True,
merge_strategy='merge', sort_attribute_values=True,
disable_infer_transcripts=True,
disable_infer_genes=True)
print "\nGFF Database Created...\n"
# Filter out SeqRec below threshold
DistArr = []
for Rec in MergedRecs:
Cond1 = Rec.expect() > 0
if Cond1 == True:
ObEx = (Rec.observ() / Rec.expect())
Cond2 = ObEx > ObExthresh
Cond3 = Rec.gc_cont() > GCthresh
if Cond2 and Cond3:
# Query GFF DB for closest gene feature *or provided feature*
Arr = cpgmod.get_closest(Rec, GffDb, StartRange, FeatGFF, ID_Feat)
if Arr <> False:
Arr.append(ObEx)
DistArr.append(Arr)
print "CpG Islands predicted...\n"
print "Generating Figure...\n"
# Releasing SeqRecs
MergedRecs = None
SeqRecList = None
# Pre-check DistArr Results
if len(DistArr) < 2:
print "WARNING, "+ str(len(DistArr)) + " sites were found."
print "Consider changing parameters.\n"
# Generate Figure:
ObExRes = pd.DataFrame({
'gene' : [],
'xval': [],
'yval': []})
try:
Cnt = 0
for Dist in DistArr:
Cnt += 1
print "PROGRESS: "+str(Cnt) +" of "+ str(len(DistArr))
ObExdf = pd.DataFrame({
'gene': [Dist[2]],
'xval': [Dist[1]],
'yval': [Dist[3]]})
ObExFram = [ObExRes, ObExdf]
ObExRes = pd.concat(ObExFram, ignore_index=True)
p = ggplot(aes(x='xval', y='yval'), data=ObExRes) \
+ geom_point() \
+ ylab("Observed/Expected CpG") \
+ xlab("Position (bp) Relative to (ATG = 0)") \
+ ggtitle("Predicted CpG Island Position Relative to ATG")
p.save(OutFile)
except IndexError as e:
print 'Error: '+ str(e)
sys.exit('Exiting script...')
print p
# Remove GFF DB
os.remove('GFF.db')
<|fim▁end|> | LineArgs = cpgmod.parseArguments() |
<|file_name|>cpg_gene.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Purpose:
This script, using default values, determines and plots the CpG islands in
relation to a given feature "type" (e.g. "gene" or "mRNA") from a GFF file
which corresponds to the user-provided fasta file.
Note:
CpG Islands are determined by ObEx = (Observed CpG) / (Expected CpG) ,
default threshold > 1.
Where Expected CpG = (count(C) * count(G)) / WindowSize
Usage:
python cpg_gene.py FastaFile Gff_File OutFile.png
Default optional parameters:
-s, Step Size, default = 50
-w, Window Size, default = 200
-oe, Minimum Observed Expected CpG, default = 1
-gc, Minimum GC, default = .5
-r Range from ATG, or provided feature, default = 5000
-f, GFF Feature, default = "gene"
-i, Gene ID from GFF, default = ""
'''
import sys
import os
import argparse
from collections import Counter
from Bio import SeqIO
import cpgmod
import gffutils
import pandas as pd
import numpy as np
from ggplot import *
# Capture command line args, with or without defaults
if __name__ == '__main__':
# Parse the arguments
LineArgs = cpgmod.parseArguments()
# Populate vars with args
FastaFile = LineArgs.FastaFile
GffFile = LineArgs.GffFile
OutFile = LineArgs.FileOut
Step = LineArgs.s
WinSize = LineArgs.w
ObExthresh = LineArgs.oe
GCthresh = LineArgs.gc
StartRange = LineArgs.r
FeatGFF = LineArgs.f
ID_Feat = LineArgs.i
# Gather all possible CpG islands
MergedRecs = []
print "Parsing sequences...\n"
for SeqRecord in SeqIO.parse(FastaFile, "fasta"):
print SeqRecord.id
# Determine if sequences and args are acceptable
cpgmod.arg_seqcheck(SeqRecord, WinSize, Step)
# Pre-determine number of islands
NumOfChunks = cpgmod.chunks(SeqRecord, WinSize, Step)
# Return array of SeqRec class (potential CpG island) instances
SeqRecList = cpgmod.compute(SeqRecord, Step, NumOfChunks, WinSize)
MergedRecs = MergedRecs + SeqRecList
# Create GFF DB
GffDb = gffutils.create_db(GffFile, dbfn='GFF.db', force=True, keep_order=True,
merge_strategy='merge', sort_attribute_values=True,
disable_infer_transcripts=True,
disable_infer_genes=True)
print "\nGFF Database Created...\n"
# Filter out SeqRec below threshold
DistArr = []
for Rec in MergedRecs:
Cond1 = Rec.expect() > 0
if Cond1 == True:
<|fim_middle|>
print "CpG Islands predicted...\n"
print "Generating Figure...\n"
# Releasing SeqRecs
MergedRecs = None
SeqRecList = None
# Pre-check DistArr Results
if len(DistArr) < 2:
print "WARNING, "+ str(len(DistArr)) + " sites were found."
print "Consider changing parameters.\n"
# Generate Figure:
ObExRes = pd.DataFrame({
'gene' : [],
'xval': [],
'yval': []})
try:
Cnt = 0
for Dist in DistArr:
Cnt += 1
print "PROGRESS: "+str(Cnt) +" of "+ str(len(DistArr))
ObExdf = pd.DataFrame({
'gene': [Dist[2]],
'xval': [Dist[1]],
'yval': [Dist[3]]})
ObExFram = [ObExRes, ObExdf]
ObExRes = pd.concat(ObExFram, ignore_index=True)
p = ggplot(aes(x='xval', y='yval'), data=ObExRes) \
+ geom_point() \
+ ylab("Observed/Expected CpG") \
+ xlab("Position (bp) Relative to (ATG = 0)") \
+ ggtitle("Predicted CpG Island Position Relative to ATG")
p.save(OutFile)
except IndexError as e:
print 'Error: '+ str(e)
sys.exit('Exiting script...')
print p
# Remove GFF DB
os.remove('GFF.db')
<|fim▁end|> | ObEx = (Rec.observ() / Rec.expect())
Cond2 = ObEx > ObExthresh
Cond3 = Rec.gc_cont() > GCthresh
if Cond2 and Cond3:
# Query GFF DB for closest gene feature *or provided feature*
Arr = cpgmod.get_closest(Rec, GffDb, StartRange, FeatGFF, ID_Feat)
if Arr <> False:
Arr.append(ObEx)
DistArr.append(Arr) |
<|file_name|>cpg_gene.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Purpose:
This script, using default values, determines and plots the CpG islands in
relation to a given feature "type" (e.g. "gene" or "mRNA") from a GFF file
which corresponds to the user-provided fasta file.
Note:
CpG Islands are determined by ObEx = (Observed CpG) / (Expected CpG) ,
default threshold > 1.
Where Expected CpG = (count(C) * count(G)) / WindowSize
Usage:
python cpg_gene.py FastaFile Gff_File OutFile.png
Default optional parameters:
-s, Step Size, default = 50
-w, Window Size, default = 200
-oe, Minimum Observed Expected CpG, default = 1
-gc, Minimum GC, default = .5
-r Range from ATG, or provided feature, default = 5000
-f, GFF Feature, default = "gene"
-i, Gene ID from GFF, default = ""
'''
import sys
import os
import argparse
from collections import Counter
from Bio import SeqIO
import cpgmod
import gffutils
import pandas as pd
import numpy as np
from ggplot import *
# Capture command line args, with or without defaults
if __name__ == '__main__':
# Parse the arguments
LineArgs = cpgmod.parseArguments()
# Populate vars with args
FastaFile = LineArgs.FastaFile
GffFile = LineArgs.GffFile
OutFile = LineArgs.FileOut
Step = LineArgs.s
WinSize = LineArgs.w
ObExthresh = LineArgs.oe
GCthresh = LineArgs.gc
StartRange = LineArgs.r
FeatGFF = LineArgs.f
ID_Feat = LineArgs.i
# Gather all possible CpG islands
MergedRecs = []
print "Parsing sequences...\n"
for SeqRecord in SeqIO.parse(FastaFile, "fasta"):
print SeqRecord.id
# Determine if sequences and args are acceptable
cpgmod.arg_seqcheck(SeqRecord, WinSize, Step)
# Pre-determine number of islands
NumOfChunks = cpgmod.chunks(SeqRecord, WinSize, Step)
# Return array of SeqRec class (potential CpG island) instances
SeqRecList = cpgmod.compute(SeqRecord, Step, NumOfChunks, WinSize)
MergedRecs = MergedRecs + SeqRecList
# Create GFF DB
GffDb = gffutils.create_db(GffFile, dbfn='GFF.db', force=True, keep_order=True,
merge_strategy='merge', sort_attribute_values=True,
disable_infer_transcripts=True,
disable_infer_genes=True)
print "\nGFF Database Created...\n"
# Filter out SeqRec below threshold
DistArr = []
for Rec in MergedRecs:
Cond1 = Rec.expect() > 0
if Cond1 == True:
ObEx = (Rec.observ() / Rec.expect())
Cond2 = ObEx > ObExthresh
Cond3 = Rec.gc_cont() > GCthresh
if Cond2 and Cond3:
# Query GFF DB for closest gene feature *or provided feature*
<|fim_middle|>
print "CpG Islands predicted...\n"
print "Generating Figure...\n"
# Releasing SeqRecs
MergedRecs = None
SeqRecList = None
# Pre-check DistArr Results
if len(DistArr) < 2:
print "WARNING, "+ str(len(DistArr)) + " sites were found."
print "Consider changing parameters.\n"
# Generate Figure:
ObExRes = pd.DataFrame({
'gene' : [],
'xval': [],
'yval': []})
try:
Cnt = 0
for Dist in DistArr:
Cnt += 1
print "PROGRESS: "+str(Cnt) +" of "+ str(len(DistArr))
ObExdf = pd.DataFrame({
'gene': [Dist[2]],
'xval': [Dist[1]],
'yval': [Dist[3]]})
ObExFram = [ObExRes, ObExdf]
ObExRes = pd.concat(ObExFram, ignore_index=True)
p = ggplot(aes(x='xval', y='yval'), data=ObExRes) \
+ geom_point() \
+ ylab("Observed/Expected CpG") \
+ xlab("Position (bp) Relative to (ATG = 0)") \
+ ggtitle("Predicted CpG Island Position Relative to ATG")
p.save(OutFile)
except IndexError as e:
print 'Error: '+ str(e)
sys.exit('Exiting script...')
print p
# Remove GFF DB
os.remove('GFF.db')
<|fim▁end|> | Arr = cpgmod.get_closest(Rec, GffDb, StartRange, FeatGFF, ID_Feat)
if Arr <> False:
Arr.append(ObEx)
DistArr.append(Arr) |
<|file_name|>cpg_gene.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Purpose:
This script, using default values, determines and plots the CpG islands in
relation to a given feature "type" (e.g. "gene" or "mRNA") from a GFF file
which corresponds to the user-provided fasta file.
Note:
CpG Islands are determined by ObEx = (Observed CpG) / (Expected CpG) ,
default threshold > 1.
Where Expected CpG = (count(C) * count(G)) / WindowSize
Usage:
python cpg_gene.py FastaFile Gff_File OutFile.png
Default optional parameters:
-s, Step Size, default = 50
-w, Window Size, default = 200
-oe, Minimum Observed Expected CpG, default = 1
-gc, Minimum GC, default = .5
-r Range from ATG, or provided feature, default = 5000
-f, GFF Feature, default = "gene"
-i, Gene ID from GFF, default = ""
'''
import sys
import os
import argparse
from collections import Counter
from Bio import SeqIO
import cpgmod
import gffutils
import pandas as pd
import numpy as np
from ggplot import *
# Capture command line args, with or without defaults
if __name__ == '__main__':
# Parse the arguments
LineArgs = cpgmod.parseArguments()
# Populate vars with args
FastaFile = LineArgs.FastaFile
GffFile = LineArgs.GffFile
OutFile = LineArgs.FileOut
Step = LineArgs.s
WinSize = LineArgs.w
ObExthresh = LineArgs.oe
GCthresh = LineArgs.gc
StartRange = LineArgs.r
FeatGFF = LineArgs.f
ID_Feat = LineArgs.i
# Gather all possible CpG islands
MergedRecs = []
print "Parsing sequences...\n"
for SeqRecord in SeqIO.parse(FastaFile, "fasta"):
print SeqRecord.id
# Determine if sequences and args are acceptable
cpgmod.arg_seqcheck(SeqRecord, WinSize, Step)
# Pre-determine number of islands
NumOfChunks = cpgmod.chunks(SeqRecord, WinSize, Step)
# Return array of SeqRec class (potential CpG island) instances
SeqRecList = cpgmod.compute(SeqRecord, Step, NumOfChunks, WinSize)
MergedRecs = MergedRecs + SeqRecList
# Create GFF DB
GffDb = gffutils.create_db(GffFile, dbfn='GFF.db', force=True, keep_order=True,
merge_strategy='merge', sort_attribute_values=True,
disable_infer_transcripts=True,
disable_infer_genes=True)
print "\nGFF Database Created...\n"
# Filter out SeqRec below threshold
DistArr = []
for Rec in MergedRecs:
Cond1 = Rec.expect() > 0
if Cond1 == True:
ObEx = (Rec.observ() / Rec.expect())
Cond2 = ObEx > ObExthresh
Cond3 = Rec.gc_cont() > GCthresh
if Cond2 and Cond3:
# Query GFF DB for closest gene feature *or provided feature*
Arr = cpgmod.get_closest(Rec, GffDb, StartRange, FeatGFF, ID_Feat)
if Arr <> False:
<|fim_middle|>
print "CpG Islands predicted...\n"
print "Generating Figure...\n"
# Releasing SeqRecs
MergedRecs = None
SeqRecList = None
# Pre-check DistArr Results
if len(DistArr) < 2:
print "WARNING, "+ str(len(DistArr)) + " sites were found."
print "Consider changing parameters.\n"
# Generate Figure:
ObExRes = pd.DataFrame({
'gene' : [],
'xval': [],
'yval': []})
try:
Cnt = 0
for Dist in DistArr:
Cnt += 1
print "PROGRESS: "+str(Cnt) +" of "+ str(len(DistArr))
ObExdf = pd.DataFrame({
'gene': [Dist[2]],
'xval': [Dist[1]],
'yval': [Dist[3]]})
ObExFram = [ObExRes, ObExdf]
ObExRes = pd.concat(ObExFram, ignore_index=True)
p = ggplot(aes(x='xval', y='yval'), data=ObExRes) \
+ geom_point() \
+ ylab("Observed/Expected CpG") \
+ xlab("Position (bp) Relative to (ATG = 0)") \
+ ggtitle("Predicted CpG Island Position Relative to ATG")
p.save(OutFile)
except IndexError as e:
print 'Error: '+ str(e)
sys.exit('Exiting script...')
print p
# Remove GFF DB
os.remove('GFF.db')
<|fim▁end|> | Arr.append(ObEx)
DistArr.append(Arr) |
<|file_name|>cpg_gene.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Purpose:
This script, using default values, determines and plots the CpG islands in
relation to a given feature "type" (e.g. "gene" or "mRNA") from a GFF file
which corresponds to the user-provided fasta file.
Note:
CpG Islands are determined by ObEx = (Observed CpG) / (Expected CpG) ,
default threshold > 1.
Where Expected CpG = (count(C) * count(G)) / WindowSize
Usage:
python cpg_gene.py FastaFile Gff_File OutFile.png
Default optional parameters:
-s, Step Size, default = 50
-w, Window Size, default = 200
-oe, Minimum Observed Expected CpG, default = 1
-gc, Minimum GC, default = .5
-r Range from ATG, or provided feature, default = 5000
-f, GFF Feature, default = "gene"
-i, Gene ID from GFF, default = ""
'''
import sys
import os
import argparse
from collections import Counter
from Bio import SeqIO
import cpgmod
import gffutils
import pandas as pd
import numpy as np
from ggplot import *
# Capture command line args, with or without defaults
if __name__ == '__main__':
# Parse the arguments
LineArgs = cpgmod.parseArguments()
# Populate vars with args
FastaFile = LineArgs.FastaFile
GffFile = LineArgs.GffFile
OutFile = LineArgs.FileOut
Step = LineArgs.s
WinSize = LineArgs.w
ObExthresh = LineArgs.oe
GCthresh = LineArgs.gc
StartRange = LineArgs.r
FeatGFF = LineArgs.f
ID_Feat = LineArgs.i
# Gather all possible CpG islands
MergedRecs = []
print "Parsing sequences...\n"
for SeqRecord in SeqIO.parse(FastaFile, "fasta"):
print SeqRecord.id
# Determine if sequences and args are acceptable
cpgmod.arg_seqcheck(SeqRecord, WinSize, Step)
# Pre-determine number of islands
NumOfChunks = cpgmod.chunks(SeqRecord, WinSize, Step)
# Return array of SeqRec class (potential CpG island) instances
SeqRecList = cpgmod.compute(SeqRecord, Step, NumOfChunks, WinSize)
MergedRecs = MergedRecs + SeqRecList
# Create GFF DB
GffDb = gffutils.create_db(GffFile, dbfn='GFF.db', force=True, keep_order=True,
merge_strategy='merge', sort_attribute_values=True,
disable_infer_transcripts=True,
disable_infer_genes=True)
print "\nGFF Database Created...\n"
# Filter out SeqRec below threshold
DistArr = []
for Rec in MergedRecs:
Cond1 = Rec.expect() > 0
if Cond1 == True:
ObEx = (Rec.observ() / Rec.expect())
Cond2 = ObEx > ObExthresh
Cond3 = Rec.gc_cont() > GCthresh
if Cond2 and Cond3:
# Query GFF DB for closest gene feature *or provided feature*
Arr = cpgmod.get_closest(Rec, GffDb, StartRange, FeatGFF, ID_Feat)
if Arr <> False:
Arr.append(ObEx)
DistArr.append(Arr)
print "CpG Islands predicted...\n"
print "Generating Figure...\n"
# Releasing SeqRecs
MergedRecs = None
SeqRecList = None
# Pre-check DistArr Results
if len(DistArr) < 2:
<|fim_middle|>
# Generate Figure:
ObExRes = pd.DataFrame({
'gene' : [],
'xval': [],
'yval': []})
try:
Cnt = 0
for Dist in DistArr:
Cnt += 1
print "PROGRESS: "+str(Cnt) +" of "+ str(len(DistArr))
ObExdf = pd.DataFrame({
'gene': [Dist[2]],
'xval': [Dist[1]],
'yval': [Dist[3]]})
ObExFram = [ObExRes, ObExdf]
ObExRes = pd.concat(ObExFram, ignore_index=True)
p = ggplot(aes(x='xval', y='yval'), data=ObExRes) \
+ geom_point() \
+ ylab("Observed/Expected CpG") \
+ xlab("Position (bp) Relative to (ATG = 0)") \
+ ggtitle("Predicted CpG Island Position Relative to ATG")
p.save(OutFile)
except IndexError as e:
print 'Error: '+ str(e)
sys.exit('Exiting script...')
print p
# Remove GFF DB
os.remove('GFF.db')
<|fim▁end|> | print "WARNING, "+ str(len(DistArr)) + " sites were found."
print "Consider changing parameters.\n" |
<|file_name|>syntax_iterators.py<|end_file_name|><|fim▁begin|>from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.<|fim▁hole|> """
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}<|fim▁end|> | |
<|file_name|>syntax_iterators.py<|end_file_name|><|fim▁begin|>from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
<|fim_middle|>
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
<|fim▁end|> | """
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label |
<|file_name|>syntax_iterators.py<|end_file_name|><|fim▁begin|>from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
<|fim_middle|>
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
<|fim▁end|> | raise ValueError(Errors.E029) |
<|file_name|>syntax_iterators.py<|end_file_name|><|fim▁begin|>from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
<|fim_middle|>
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
<|fim▁end|> | continue |
<|file_name|>syntax_iterators.py<|end_file_name|><|fim▁begin|>from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
<|fim_middle|>
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
<|fim▁end|> | continue |
<|file_name|>syntax_iterators.py<|end_file_name|><|fim▁begin|>from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
<|fim_middle|>
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
<|fim▁end|> | prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label |
<|file_name|>syntax_iterators.py<|end_file_name|><|fim▁begin|>from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
<|fim_middle|>
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
<|fim▁end|> | head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label |
<|file_name|>syntax_iterators.py<|end_file_name|><|fim▁begin|>from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
<|fim_middle|>
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
<|fim▁end|> | prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label |
<|file_name|>syntax_iterators.py<|end_file_name|><|fim▁begin|>from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def <|fim_middle|>(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
<|fim▁end|> | noun_chunks |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,<|fim▁hole|> """
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True<|fim▁end|> | the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors). |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
<|fim_middle|>
<|fim▁end|> | can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
<|fim_middle|>
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
<|fim▁end|> | return "Median Filter" |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
<|fim_middle|>
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
<|fim▁end|> | if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset) |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
<|fim_middle|>
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
<|fim▁end|> | for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save() |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
<|fim_middle|>
<|fim▁end|> | self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
<|fim_middle|>
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
<|fim▁end|> | raise VoltPyNotAllowed('Incomplete procedure.') |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def <|fim_middle|>(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
<|fim▁end|> | __str__ |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def <|fim_middle|>(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
<|fim▁end|> | apply |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def <|fim_middle|>(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
<|fim▁end|> | __perform |
<|file_name|>MedianFilter.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def <|fim_middle|>(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
<|fim▁end|> | finalize |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'<|fim▁hole|> name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)<|fim▁end|> | |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
<|fim_middle|>
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | _name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True) |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
<|fim_middle|>
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | _name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ] |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
<|fim_middle|>
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
<|fim_middle|>
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
<|fim_middle|>
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | _name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True) |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
<|fim_middle|>
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | _name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ] |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
<|fim_middle|>
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | _name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')] |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
<|fim_middle|>
<|fim▁end|> | _name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True) |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
<|fim_middle|>
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | name = 'Manual' |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
<|fim_middle|>
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | name = 'Preimpresa' |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
<|fim_middle|>
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | name = 'Online' |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
<|fim_middle|>
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | name = 'Electronica' |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
<|fim_middle|>
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | name = '%s %s' % (self.prefix, name) |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
<|fim_middle|>
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | name = '%s %s' % (name, self.sufix) |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def <|fim_middle|>(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | get_name |
<|file_name|>afip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def <|fim_middle|>(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
<|fim▁end|> | get_journal_document_class_ids |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from karld.loadump import dump_dicts_to_json_file
from karld.loadump import ensure_dir
from karld.loadump import ensure_file_path_dir<|fim▁hole|>from karld.loadump import i_get_csv_data
from karld.loadump import is_file_csv
from karld.loadump import i_get_json_data
from karld.loadump import is_file_json
from karld.loadump import raw_line_reader
from karld.loadump import split_csv_file
from karld.loadump import split_file
from karld.loadump import split_file_output
from karld.loadump import split_file_output_csv
from karld.loadump import split_file_output_json
from karld.loadump import write_as_csv
from karld.loadump import write_as_json<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .inventory import (
GetInventoryRequest,
Inventory,
ListInventoriesRequest,
ListInventoriesResponse,
InventoryView,
)
from .os_policy import OSPolicy
from .os_policy_assignment_reports import (
GetOSPolicyAssignmentReportRequest,
ListOSPolicyAssignmentReportsRequest,
ListOSPolicyAssignmentReportsResponse,
OSPolicyAssignmentReport,
)
from .os_policy_assignments import (
CreateOSPolicyAssignmentRequest,
DeleteOSPolicyAssignmentRequest,
GetOSPolicyAssignmentRequest,
ListOSPolicyAssignmentRevisionsRequest,
ListOSPolicyAssignmentRevisionsResponse,
ListOSPolicyAssignmentsRequest,
ListOSPolicyAssignmentsResponse,
OSPolicyAssignment,
OSPolicyAssignmentOperationMetadata,
UpdateOSPolicyAssignmentRequest,
)
from .osconfig_common import FixedOrPercent
from .patch_deployments import (
CreatePatchDeploymentRequest,
DeletePatchDeploymentRequest,
GetPatchDeploymentRequest,
ListPatchDeploymentsRequest,
ListPatchDeploymentsResponse,
MonthlySchedule,
OneTimeSchedule,
PatchDeployment,
PausePatchDeploymentRequest,
RecurringSchedule,
ResumePatchDeploymentRequest,
UpdatePatchDeploymentRequest,
WeekDayOfMonth,
WeeklySchedule,
)
from .patch_jobs import (
AptSettings,
CancelPatchJobRequest,
ExecStep,
ExecStepConfig,
ExecutePatchJobRequest,
GcsObject,
GetPatchJobRequest,
GooSettings,
Instance,
ListPatchJobInstanceDetailsRequest,
ListPatchJobInstanceDetailsResponse,
ListPatchJobsRequest,
ListPatchJobsResponse,
PatchConfig,
PatchInstanceFilter,
PatchJob,
PatchJobInstanceDetails,
PatchRollout,
WindowsUpdateSettings,
YumSettings,
ZypperSettings,
)
from .vulnerability import (
CVSSv3,
GetVulnerabilityReportRequest,
ListVulnerabilityReportsRequest,
ListVulnerabilityReportsResponse,
VulnerabilityReport,
)
__all__ = (
"GetInventoryRequest",
"Inventory",
"ListInventoriesRequest",
"ListInventoriesResponse",
"InventoryView",
"OSPolicy",
"GetOSPolicyAssignmentReportRequest",
"ListOSPolicyAssignmentReportsRequest",
"ListOSPolicyAssignmentReportsResponse",
"OSPolicyAssignmentReport",
"CreateOSPolicyAssignmentRequest",
"DeleteOSPolicyAssignmentRequest",
"GetOSPolicyAssignmentRequest",
"ListOSPolicyAssignmentRevisionsRequest",
"ListOSPolicyAssignmentRevisionsResponse",
"ListOSPolicyAssignmentsRequest",
"ListOSPolicyAssignmentsResponse",
"OSPolicyAssignment",
"OSPolicyAssignmentOperationMetadata",
"UpdateOSPolicyAssignmentRequest",
"FixedOrPercent",
"CreatePatchDeploymentRequest",
"DeletePatchDeploymentRequest",
"GetPatchDeploymentRequest",
"ListPatchDeploymentsRequest",
"ListPatchDeploymentsResponse",
"MonthlySchedule",
"OneTimeSchedule",
"PatchDeployment",
"PausePatchDeploymentRequest",
"RecurringSchedule",
"ResumePatchDeploymentRequest",
"UpdatePatchDeploymentRequest",
"WeekDayOfMonth",<|fim▁hole|> "ExecStep",
"ExecStepConfig",
"ExecutePatchJobRequest",
"GcsObject",
"GetPatchJobRequest",
"GooSettings",
"Instance",
"ListPatchJobInstanceDetailsRequest",
"ListPatchJobInstanceDetailsResponse",
"ListPatchJobsRequest",
"ListPatchJobsResponse",
"PatchConfig",
"PatchInstanceFilter",
"PatchJob",
"PatchJobInstanceDetails",
"PatchRollout",
"WindowsUpdateSettings",
"YumSettings",
"ZypperSettings",
"CVSSv3",
"GetVulnerabilityReportRequest",
"ListVulnerabilityReportsRequest",
"ListVulnerabilityReportsResponse",
"VulnerabilityReport",
)<|fim▁end|> | "WeeklySchedule",
"AptSettings",
"CancelPatchJobRequest", |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())<|fim▁hole|>
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]<|fim▁end|> | |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
<|fim_middle|>
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
) |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
<|fim_middle|>
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
) |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
<|fim_middle|>
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author) |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
<|fim_middle|>
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | return Keyword(keyword=fake.domain_word()) |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
<|fim_middle|>
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | return make_author() |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
<|fim_middle|>
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | return [make_author() for _ in range(3)] |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
<|fim_middle|>
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | return [make_comment() for _ in range(3)] |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
<|fim_middle|>
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | return make_post() |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
<|fim_middle|>
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | return make_post(with_comments=False) |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
<|fim_middle|>
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
<|fim▁end|> | return make_post(with_author=False) |
Subsets and Splits