index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
|---|---|---|---|---|
98,600 |
bd4bfd2045243258a2936d602e25e747bd5817ce
|
from xai.brain.wordbase.nouns._quiver import _QUIVER
#calss header
class _QUIVERED(_QUIVER, ):
def __init__(self,):
_QUIVER.__init__(self)
self.name = "QUIVERED"
self.specie = 'nouns'
self.basic = "quiver"
self.jsondata = {}
|
[
"\n\nfrom xai.brain.wordbase.nouns._quiver import _QUIVER\n\n#calss header\nclass _QUIVERED(_QUIVER, ):\n\tdef __init__(self,): \n\t\t_QUIVER.__init__(self)\n\t\tself.name = \"QUIVERED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"quiver\"\n\t\tself.jsondata = {}\n",
"from xai.brain.wordbase.nouns._quiver import _QUIVER\n\n\nclass _QUIVERED(_QUIVER):\n\n def __init__(self):\n _QUIVER.__init__(self)\n self.name = 'QUIVERED'\n self.specie = 'nouns'\n self.basic = 'quiver'\n self.jsondata = {}\n",
"<import token>\n\n\nclass _QUIVERED(_QUIVER):\n\n def __init__(self):\n _QUIVER.__init__(self)\n self.name = 'QUIVERED'\n self.specie = 'nouns'\n self.basic = 'quiver'\n self.jsondata = {}\n",
"<import token>\n\n\nclass _QUIVERED(_QUIVER):\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,601 |
fe0b06af156219034a08468191e566d9c35a2d6d
|
from idc import *
base = 0x4000000
seg_size = 0x400000
address = base
add_segm_ex(base, base + seg_size, 0X1, 2, 1, 2, ADDSEG_NOSREG)
set_segm_name(base, "patch")
set_segm_class(base, "CODE")
set_segm_type(base, 2)
patch_byte(0x4000000, 0xe0)
patch_byte(0x4000001, 0x3)
patch_byte(0x4000002, 0x14)
patch_byte(0x4000003, 0x2a)
patch_byte(0x4000004, 0xf3)
patch_byte(0x4000005, 0x3)
patch_byte(0x4000006, 0x7)
patch_byte(0x4000007, 0x2a)
patch_byte(0x4000008, 0x7f)
patch_byte(0x4000009, 0x2)
patch_byte(0x400000a, 0xa)
patch_byte(0x400000b, 0x6b)
patch_byte(0x400000c, 0x7f)
patch_byte(0x400000d, 0x2)
patch_byte(0x400000e, 0xb)
patch_byte(0x400000f, 0x6b)
patch_byte(0x4000010, 0x7f)
patch_byte(0x4000011, 0x2)
patch_byte(0x4000012, 0x8)
patch_byte(0x4000013, 0x6b)
patch_byte(0x4000014, 0xe7)
patch_byte(0x4000015, 0x3)
patch_byte(0x4000016, 0x13)
patch_byte(0x4000017, 0x2a)
patch_byte(0x4000018, 0xf4)
patch_byte(0x4000019, 0x3)
patch_byte(0x400001a, 0x0)
patch_byte(0x400001b, 0x2a)
patch_byte(0x400001c, 0xed)
patch_byte(0x400001d, 0x1)
patch_byte(0x400001e, 0x0)
patch_byte(0x400001f, 0x17)
patch_byte(0x754, 0x2b)
patch_byte(0x755, 0xfe)
patch_byte(0x756, 0xff)
patch_byte(0x757, 0x14)
patch_byte(0x4000020, 0x47)
patch_byte(0x4000021, 0x0)
patch_byte(0x4000022, 0x0)
patch_byte(0x4000023, 0x35)
patch_byte(0x4000024, 0xa7)
patch_byte(0x4000025, 0x1)
patch_byte(0x4000026, 0x0)
patch_byte(0x4000027, 0x34)
patch_byte(0x4000028, 0xe0)
patch_byte(0x4000029, 0x3)
patch_byte(0x400002a, 0x14)
patch_byte(0x400002b, 0x2a)
patch_byte(0x400002c, 0xf3)
patch_byte(0x400002d, 0x3)
patch_byte(0x400002e, 0x7)
patch_byte(0x400002f, 0x2a)
patch_byte(0x4000030, 0x7f)
patch_byte(0x4000031, 0x2)
patch_byte(0x4000032, 0xa)
patch_byte(0x4000033, 0x6b)
patch_byte(0x4000034, 0x7f)
patch_byte(0x4000035, 0x2)
patch_byte(0x4000036, 0xb)
patch_byte(0x4000037, 0x6b)
patch_byte(0x4000038, 0x7f)
patch_byte(0x4000039, 0x2)
patch_byte(0x400003a, 0xc)
patch_byte(0x400003b, 0x6b)
patch_byte(0x400003c, 0x7f)
patch_byte(0x400003d, 0x2)
patch_byte(0x400003e, 0x10)
patch_byte(0x400003f, 0x6b)
patch_byte(0x4000040, 0x7f)
patch_byte(0x4000041, 0x2)
patch_byte(0x4000042, 0x12)
patch_byte(0x4000043, 0x6b)
patch_byte(0x4000044, 0x7f)
patch_byte(0x4000045, 0x2)
patch_byte(0x4000046, 0x2)
patch_byte(0x4000047, 0x6b)
patch_byte(0x4000048, 0x87)
patch_byte(0x4000049, 0x81)
patch_byte(0x400004a, 0xba)
patch_byte(0x400004b, 0x52)
patch_byte(0x400004c, 0x87)
patch_byte(0x400004d, 0xc)
patch_byte(0x400004e, 0x8f)
patch_byte(0x400004f, 0x72)
patch_byte(0x4000050, 0xf4)
patch_byte(0x4000051, 0x3)
patch_byte(0x4000052, 0x1)
patch_byte(0x4000053, 0x2a)
patch_byte(0x4000054, 0xf5)
patch_byte(0x4000055, 0x1)
patch_byte(0x4000056, 0x0)
patch_byte(0x4000057, 0x17)
patch_byte(0x4000058, 0xe0)
patch_byte(0x4000059, 0x3)
patch_byte(0x400005a, 0x14)
patch_byte(0x400005b, 0x2a)
patch_byte(0x400005c, 0xf3)
patch_byte(0x400005d, 0x3)
patch_byte(0x400005e, 0x7)
patch_byte(0x400005f, 0x2a)
patch_byte(0x4000060, 0x7f)
patch_byte(0x4000061, 0x2)
patch_byte(0x4000062, 0xa)
patch_byte(0x4000063, 0x6b)
patch_byte(0x4000064, 0x7f)
patch_byte(0x4000065, 0x2)
patch_byte(0x4000066, 0xb)
patch_byte(0x4000067, 0x6b)
patch_byte(0x4000068, 0x7f)
patch_byte(0x4000069, 0x2)
patch_byte(0x400006a, 0xc)
patch_byte(0x400006b, 0x6b)
patch_byte(0x400006c, 0x7f)
patch_byte(0x400006d, 0x2)
patch_byte(0x400006e, 0x10)
patch_byte(0x400006f, 0x6b)
patch_byte(0x4000070, 0x7f)
patch_byte(0x4000071, 0x2)
patch_byte(0x4000072, 0x11)
patch_byte(0x4000073, 0x6b)
patch_byte(0x4000074, 0xe7)
patch_byte(0x4000075, 0x3)
patch_byte(0x4000076, 0x13)
patch_byte(0x4000077, 0x2a)
patch_byte(0x4000078, 0xf4)
patch_byte(0x4000079, 0x3)
patch_byte(0x400007a, 0x0)
patch_byte(0x400007b, 0x2a)
patch_byte(0x400007c, 0xe0)
patch_byte(0x400007d, 0x1)
patch_byte(0x400007e, 0x0)
patch_byte(0x400007f, 0x17)
patch_byte(0x760, 0xe7)
patch_byte(0x761, 0x17)
patch_byte(0x762, 0x9f)
patch_byte(0x763, 0x1a)
patch_byte(0x764, 0x2f)
patch_byte(0x765, 0xfe)
patch_byte(0x766, 0xff)
patch_byte(0x767, 0x14)
patch_byte(0x4000080, 0x47)
patch_byte(0x4000081, 0x0)
patch_byte(0x4000082, 0x0)
patch_byte(0x4000083, 0x35)
patch_byte(0x4000084, 0x27)
patch_byte(0x4000085, 0x1)
patch_byte(0x4000086, 0x0)
patch_byte(0x4000087, 0x34)
patch_byte(0x4000088, 0xe0)
patch_byte(0x4000089, 0x3)
patch_byte(0x400008a, 0x14)
patch_byte(0x400008b, 0x2a)
patch_byte(0x400008c, 0xf3)
patch_byte(0x400008d, 0x3)
patch_byte(0x400008e, 0x7)
patch_byte(0x400008f, 0x2a)
patch_byte(0x4000090, 0x7f)
patch_byte(0x4000091, 0x2)
patch_byte(0x4000092, 0xa)
patch_byte(0x4000093, 0x6b)
patch_byte(0x4000094, 0x7f)
patch_byte(0x4000095, 0x2)
patch_byte(0x4000096, 0xb)
patch_byte(0x4000097, 0x6b)
patch_byte(0x4000098, 0x7f)
patch_byte(0x4000099, 0x2)
patch_byte(0x400009a, 0xc)
patch_byte(0x400009b, 0x6b)
patch_byte(0x400009c, 0x7f)
patch_byte(0x400009d, 0x2)
patch_byte(0x400009e, 0x10)
patch_byte(0x400009f, 0x6b)
patch_byte(0x40000a0, 0x7f)
patch_byte(0x40000a1, 0x2)
patch_byte(0x40000a2, 0x12)
patch_byte(0x40000a3, 0x6b)
patch_byte(0x40000a4, 0xdc)
patch_byte(0x40000a5, 0x1)
patch_byte(0x40000a6, 0x0)
patch_byte(0x40000a7, 0x17)
patch_byte(0x40000a8, 0xe0)
patch_byte(0x40000a9, 0x3)
patch_byte(0x40000aa, 0x14)
patch_byte(0x40000ab, 0x2a)
patch_byte(0x40000ac, 0xf3)
patch_byte(0x40000ad, 0x3)
patch_byte(0x40000ae, 0x7)
patch_byte(0x40000af, 0x2a)
patch_byte(0x40000b0, 0x7f)
patch_byte(0x40000b1, 0x2)
patch_byte(0x40000b2, 0xa)
patch_byte(0x40000b3, 0x6b)
patch_byte(0x40000b4, 0x7f)
patch_byte(0x40000b5, 0x2)
patch_byte(0x40000b6, 0xb)
patch_byte(0x40000b7, 0x6b)
patch_byte(0x40000b8, 0x7f)
patch_byte(0x40000b9, 0x2)
patch_byte(0x40000ba, 0xc)
patch_byte(0x40000bb, 0x6b)
patch_byte(0x40000bc, 0x7f)
patch_byte(0x40000bd, 0x2)
patch_byte(0x40000be, 0xd)
patch_byte(0x40000bf, 0x6b)
patch_byte(0x40000c0, 0xe7)
patch_byte(0x40000c1, 0x3)
patch_byte(0x40000c2, 0x13)
patch_byte(0x40000c3, 0x2a)
patch_byte(0x40000c4, 0xf4)
patch_byte(0x40000c5, 0x3)
patch_byte(0x40000c6, 0x0)
patch_byte(0x40000c7, 0x2a)
patch_byte(0x40000c8, 0xb4)
patch_byte(0x40000c9, 0x1)
patch_byte(0x40000ca, 0x0)
patch_byte(0x40000cb, 0x17)
patch_byte(0x7d8, 0xe7)
patch_byte(0x7d9, 0x17)
patch_byte(0x7da, 0x9f)
patch_byte(0x7db, 0x1a)
patch_byte(0x7e0, 0x28)
patch_byte(0x7e1, 0xfe)
patch_byte(0x7e2, 0xff)
patch_byte(0x7e3, 0x14)
patch_byte(0x40000cc, 0xe0)
patch_byte(0x40000cd, 0x3)
patch_byte(0x40000ce, 0x14)
patch_byte(0x40000cf, 0x2a)
patch_byte(0x40000d0, 0xf3)
patch_byte(0x40000d1, 0x3)
patch_byte(0x40000d2, 0x7)
patch_byte(0x40000d3, 0x2a)
patch_byte(0x40000d4, 0x7f)
patch_byte(0x40000d5, 0x2)
patch_byte(0x40000d6, 0xa)
patch_byte(0x40000d7, 0x6b)
patch_byte(0x40000d8, 0x7f)
patch_byte(0x40000d9, 0x2)
patch_byte(0x40000da, 0xb)
patch_byte(0x40000db, 0x6b)
patch_byte(0x40000dc, 0x7f)
patch_byte(0x40000dd, 0x2)
patch_byte(0x40000de, 0xc)
patch_byte(0x40000df, 0x6b)
patch_byte(0x40000e0, 0x7f)
patch_byte(0x40000e1, 0x2)
patch_byte(0x40000e2, 0x10)
patch_byte(0x40000e3, 0x6b)
patch_byte(0x40000e4, 0x7f)
patch_byte(0x40000e5, 0x2)
patch_byte(0x40000e6, 0x12)
patch_byte(0x40000e7, 0x6b)
patch_byte(0x40000e8, 0x7f)
patch_byte(0x40000e9, 0x2)
patch_byte(0x40000ea, 0x2)
patch_byte(0x40000eb, 0x6b)
patch_byte(0x40000ec, 0x87)
patch_byte(0x40000ed, 0x81)
patch_byte(0x40000ee, 0xba)
patch_byte(0x40000ef, 0x52)
patch_byte(0x40000f0, 0x87)
patch_byte(0x40000f1, 0xc)
patch_byte(0x40000f2, 0x8f)
patch_byte(0x40000f3, 0x72)
patch_byte(0x40000f4, 0xf4)
patch_byte(0x40000f5, 0x3)
patch_byte(0x40000f6, 0x1)
patch_byte(0x40000f7, 0x2a)
patch_byte(0x40000f8, 0x7f)
patch_byte(0x40000f9, 0x2)
patch_byte(0x40000fa, 0x4)
patch_byte(0x40000fb, 0x6b)
patch_byte(0x40000fc, 0x87)
patch_byte(0x40000fd, 0x81)
patch_byte(0x40000fe, 0xba)
patch_byte(0x40000ff, 0x52)
patch_byte(0x4000100, 0x87)
patch_byte(0x4000101, 0xc)
patch_byte(0x4000102, 0x8f)
patch_byte(0x4000103, 0x72)
patch_byte(0x4000104, 0xf4)
patch_byte(0x4000105, 0x3)
patch_byte(0x4000106, 0x5)
patch_byte(0x4000107, 0x2a)
patch_byte(0x4000108, 0x7f)
patch_byte(0x4000109, 0x2)
patch_byte(0x400010a, 0x9)
patch_byte(0x400010b, 0x6b)
patch_byte(0x400010c, 0xe7)
patch_byte(0x400010d, 0x3)
patch_byte(0x400010e, 0x13)
patch_byte(0x400010f, 0x2a)
patch_byte(0x4000110, 0xf4)
patch_byte(0x4000111, 0x3)
patch_byte(0x4000112, 0x0)
patch_byte(0x4000113, 0x2a)
patch_byte(0x4000114, 0xce)
patch_byte(0x4000115, 0x1)
patch_byte(0x4000116, 0x0)
patch_byte(0x4000117, 0x17)
patch_byte(0x824, 0x2a)
patch_byte(0x825, 0xfe)
patch_byte(0x826, 0xff)
patch_byte(0x827, 0x14)
patch_byte(0x4000118, 0xe0)
patch_byte(0x4000119, 0x3)
patch_byte(0x400011a, 0x14)
patch_byte(0x400011b, 0x2a)
patch_byte(0x400011c, 0xf3)
patch_byte(0x400011d, 0x3)
patch_byte(0x400011e, 0x7)
patch_byte(0x400011f, 0x2a)
patch_byte(0x4000120, 0x7f)
patch_byte(0x4000121, 0x2)
patch_byte(0x4000122, 0xa)
patch_byte(0x4000123, 0x6b)
patch_byte(0x4000124, 0x7f)
patch_byte(0x4000125, 0x2)
patch_byte(0x4000126, 0xb)
patch_byte(0x4000127, 0x6b)
patch_byte(0x4000128, 0x7f)
patch_byte(0x4000129, 0x2)
patch_byte(0x400012a, 0xc)
patch_byte(0x400012b, 0x6b)
patch_byte(0x400012c, 0x7f)
patch_byte(0x400012d, 0x2)
patch_byte(0x400012e, 0x10)
patch_byte(0x400012f, 0x6b)
patch_byte(0x4000130, 0x7f)
patch_byte(0x4000131, 0x2)
patch_byte(0x4000132, 0x12)
patch_byte(0x4000133, 0x6b)
patch_byte(0x4000134, 0x7f)
patch_byte(0x4000135, 0x2)
patch_byte(0x4000136, 0x2)
patch_byte(0x4000137, 0x6b)
patch_byte(0x4000138, 0x87)
patch_byte(0x4000139, 0x81)
patch_byte(0x400013a, 0xba)
patch_byte(0x400013b, 0x52)
patch_byte(0x400013c, 0x87)
patch_byte(0x400013d, 0xc)
patch_byte(0x400013e, 0x8f)
patch_byte(0x400013f, 0x72)
patch_byte(0x4000140, 0xf4)
patch_byte(0x4000141, 0x3)
patch_byte(0x4000142, 0x1)
patch_byte(0x4000143, 0x2a)
patch_byte(0x4000144, 0x7f)
patch_byte(0x4000145, 0x2)
patch_byte(0x4000146, 0x4)
patch_byte(0x4000147, 0x6b)
patch_byte(0x4000148, 0x87)
patch_byte(0x4000149, 0x81)
patch_byte(0x400014a, 0xba)
patch_byte(0x400014b, 0x52)
patch_byte(0x400014c, 0x87)
patch_byte(0x400014d, 0xc)
patch_byte(0x400014e, 0x8f)
patch_byte(0x400014f, 0x72)
patch_byte(0x4000150, 0xf4)
patch_byte(0x4000151, 0x3)
patch_byte(0x4000152, 0x5)
patch_byte(0x4000153, 0x2a)
patch_byte(0x4000154, 0x7f)
patch_byte(0x4000155, 0x2)
patch_byte(0x4000156, 0x9)
patch_byte(0x4000157, 0x6b)
patch_byte(0x4000158, 0xe7)
patch_byte(0x4000159, 0x3)
patch_byte(0x400015a, 0x13)
patch_byte(0x400015b, 0x2a)
patch_byte(0x400015c, 0xf4)
patch_byte(0x400015d, 0x3)
patch_byte(0x400015e, 0x0)
patch_byte(0x400015f, 0x2a)
patch_byte(0x4000160, 0xbb)
patch_byte(0x4000161, 0x1)
patch_byte(0x4000162, 0x0)
patch_byte(0x4000163, 0x17)
patch_byte(0x808, 0x44)
patch_byte(0x809, 0xfe)
patch_byte(0x80a, 0xff)
patch_byte(0x80b, 0x14)
patch_byte(0x4000164, 0x47)
patch_byte(0x4000165, 0x0)
patch_byte(0x4000166, 0x0)
patch_byte(0x4000167, 0x35)
patch_byte(0x4000168, 0x87)
patch_byte(0x4000169, 0x1)
patch_byte(0x400016a, 0x0)
patch_byte(0x400016b, 0x34)
patch_byte(0x400016c, 0xe0)
patch_byte(0x400016d, 0x3)
patch_byte(0x400016e, 0x14)
patch_byte(0x400016f, 0x2a)
patch_byte(0x4000170, 0xf3)
patch_byte(0x4000171, 0x3)
patch_byte(0x4000172, 0x7)
patch_byte(0x4000173, 0x2a)
patch_byte(0x4000174, 0x7f)
patch_byte(0x4000175, 0x2)
patch_byte(0x4000176, 0xa)
patch_byte(0x4000177, 0x6b)
patch_byte(0x4000178, 0x7f)
patch_byte(0x4000179, 0x2)
patch_byte(0x400017a, 0xb)
patch_byte(0x400017b, 0x6b)
patch_byte(0x400017c, 0x7f)
patch_byte(0x400017d, 0x2)
patch_byte(0x400017e, 0xc)
patch_byte(0x400017f, 0x6b)
patch_byte(0x4000180, 0x7f)
patch_byte(0x4000181, 0x2)
patch_byte(0x4000182, 0x10)
patch_byte(0x4000183, 0x6b)
patch_byte(0x4000184, 0x7f)
patch_byte(0x4000185, 0x2)
patch_byte(0x4000186, 0x12)
patch_byte(0x4000187, 0x6b)
patch_byte(0x4000188, 0x7f)
patch_byte(0x4000189, 0x2)
patch_byte(0x400018a, 0xf)
patch_byte(0x400018b, 0x6b)
patch_byte(0x400018c, 0xe7)
patch_byte(0x400018d, 0x3)
patch_byte(0x400018e, 0x13)
patch_byte(0x400018f, 0x2a)
patch_byte(0x4000190, 0xf4)
patch_byte(0x4000191, 0x3)
patch_byte(0x4000192, 0x0)
patch_byte(0x4000193, 0x2a)
patch_byte(0x4000194, 0xb3)
patch_byte(0x4000195, 0x1)
patch_byte(0x4000196, 0x0)
patch_byte(0x4000197, 0x17)
patch_byte(0x4000198, 0xe0)
patch_byte(0x4000199, 0x3)
patch_byte(0x400019a, 0x14)
patch_byte(0x400019b, 0x2a)
patch_byte(0x400019c, 0xf3)
patch_byte(0x400019d, 0x3)
patch_byte(0x400019e, 0x7)
patch_byte(0x400019f, 0x2a)
patch_byte(0x40001a0, 0x7f)
patch_byte(0x40001a1, 0x2)
patch_byte(0x40001a2, 0xa)
patch_byte(0x40001a3, 0x6b)
patch_byte(0x40001a4, 0x7f)
patch_byte(0x40001a5, 0x2)
patch_byte(0x40001a6, 0xe)
patch_byte(0x40001a7, 0x6b)
patch_byte(0x40001a8, 0xe7)
patch_byte(0x40001a9, 0x3)
patch_byte(0x40001aa, 0x13)
patch_byte(0x40001ab, 0x2a)
patch_byte(0x40001ac, 0xf4)
patch_byte(0x40001ad, 0x3)
patch_byte(0x40001ae, 0x0)
patch_byte(0x40001af, 0x2a)
patch_byte(0x40001b0, 0x83)
patch_byte(0x40001b1, 0x1)
patch_byte(0x40001b2, 0x0)
patch_byte(0x40001b3, 0x17)
patch_byte(0x7a0, 0xe7)
patch_byte(0x7a1, 0x17)
patch_byte(0x7a2, 0x9f)
patch_byte(0x7a3, 0x1a)
patch_byte(0x7a8, 0x6f)
patch_byte(0x7a9, 0xfe)
patch_byte(0x7aa, 0xff)
patch_byte(0x7ab, 0x14)
patch_byte(0x40001b4, 0xe0)
patch_byte(0x40001b5, 0x3)
patch_byte(0x40001b6, 0x14)
patch_byte(0x40001b7, 0x2a)
patch_byte(0x40001b8, 0xf3)
patch_byte(0x40001b9, 0x3)
patch_byte(0x40001ba, 0x7)
patch_byte(0x40001bb, 0x2a)
patch_byte(0x40001bc, 0x7f)
patch_byte(0x40001bd, 0x2)
patch_byte(0x40001be, 0xa)
patch_byte(0x40001bf, 0x6b)
patch_byte(0x40001c0, 0x7f)
patch_byte(0x40001c1, 0x2)
patch_byte(0x40001c2, 0xb)
patch_byte(0x40001c3, 0x6b)
patch_byte(0x40001c4, 0x7f)
patch_byte(0x40001c5, 0x2)
patch_byte(0x40001c6, 0xc)
patch_byte(0x40001c7, 0x6b)
patch_byte(0x40001c8, 0x7f)
patch_byte(0x40001c9, 0x2)
patch_byte(0x40001ca, 0x10)
patch_byte(0x40001cb, 0x6b)
patch_byte(0x40001cc, 0x7f)
patch_byte(0x40001cd, 0x2)
patch_byte(0x40001ce, 0x12)
patch_byte(0x40001cf, 0x6b)
patch_byte(0x40001d0, 0x7f)
patch_byte(0x40001d1, 0x2)
patch_byte(0x40001d2, 0x2)
patch_byte(0x40001d3, 0x6b)
patch_byte(0x40001d4, 0x87)
patch_byte(0x40001d5, 0x81)
patch_byte(0x40001d6, 0xba)
patch_byte(0x40001d7, 0x52)
patch_byte(0x40001d8, 0x87)
patch_byte(0x40001d9, 0xc)
patch_byte(0x40001da, 0x8f)
patch_byte(0x40001db, 0x72)
patch_byte(0x40001dc, 0xf4)
patch_byte(0x40001dd, 0x3)
patch_byte(0x40001de, 0x1)
patch_byte(0x40001df, 0x2a)
patch_byte(0x40001e0, 0x7f)
patch_byte(0x40001e1, 0x2)
patch_byte(0x40001e2, 0x4)
patch_byte(0x40001e3, 0x6b)
patch_byte(0x40001e4, 0x87)
patch_byte(0x40001e5, 0x81)
patch_byte(0x40001e6, 0xba)
patch_byte(0x40001e7, 0x52)
patch_byte(0x40001e8, 0x87)
patch_byte(0x40001e9, 0xc)
patch_byte(0x40001ea, 0x8f)
patch_byte(0x40001eb, 0x72)
patch_byte(0x40001ec, 0xf4)
patch_byte(0x40001ed, 0x3)
patch_byte(0x40001ee, 0x5)
patch_byte(0x40001ef, 0x2a)
patch_byte(0x40001f0, 0x7f)
patch_byte(0x40001f1, 0x2)
patch_byte(0x40001f2, 0x9)
patch_byte(0x40001f3, 0x6b)
patch_byte(0x40001f4, 0xe7)
patch_byte(0x40001f5, 0x3)
patch_byte(0x40001f6, 0x13)
patch_byte(0x40001f7, 0x2a)
patch_byte(0x40001f8, 0xf4)
patch_byte(0x40001f9, 0x3)
patch_byte(0x40001fa, 0x0)
patch_byte(0x40001fb, 0x2a)
patch_byte(0x40001fc, 0x94)
patch_byte(0x40001fd, 0x1)
patch_byte(0x40001fe, 0x0)
patch_byte(0x40001ff, 0x17)
patch_byte(0x86c, 0x52)
patch_byte(0x86d, 0xfe)
patch_byte(0x86e, 0xff)
patch_byte(0x86f, 0x14)
patch_byte(0x4000200, 0xe0)
patch_byte(0x4000201, 0x3)
patch_byte(0x4000202, 0x14)
patch_byte(0x4000203, 0x2a)
patch_byte(0x4000204, 0xf3)
patch_byte(0x4000205, 0x3)
patch_byte(0x4000206, 0x7)
patch_byte(0x4000207, 0x2a)
patch_byte(0x4000208, 0x7f)
patch_byte(0x4000209, 0x2)
patch_byte(0x400020a, 0xa)
patch_byte(0x400020b, 0x6b)
patch_byte(0x400020c, 0x7f)
patch_byte(0x400020d, 0x2)
patch_byte(0x400020e, 0xb)
patch_byte(0x400020f, 0x6b)
patch_byte(0x4000210, 0x7f)
patch_byte(0x4000211, 0x2)
patch_byte(0x4000212, 0xc)
patch_byte(0x4000213, 0x6b)
patch_byte(0x4000214, 0x7f)
patch_byte(0x4000215, 0x2)
patch_byte(0x4000216, 0x10)
patch_byte(0x4000217, 0x6b)
patch_byte(0x4000218, 0x7f)
patch_byte(0x4000219, 0x2)
patch_byte(0x400021a, 0x12)
patch_byte(0x400021b, 0x6b)
patch_byte(0x400021c, 0x7f)
patch_byte(0x400021d, 0x2)
patch_byte(0x400021e, 0x2)
patch_byte(0x400021f, 0x6b)
patch_byte(0x4000220, 0x87)
patch_byte(0x4000221, 0x81)
patch_byte(0x4000222, 0xba)
patch_byte(0x4000223, 0x52)
patch_byte(0x4000224, 0x87)
patch_byte(0x4000225, 0xc)
patch_byte(0x4000226, 0x8f)
patch_byte(0x4000227, 0x72)
patch_byte(0x4000228, 0xf4)
patch_byte(0x4000229, 0x3)
patch_byte(0x400022a, 0x1)
patch_byte(0x400022b, 0x2a)
patch_byte(0x400022c, 0x7f)
patch_byte(0x400022d, 0x2)
patch_byte(0x400022e, 0x4)
patch_byte(0x400022f, 0x6b)
patch_byte(0x4000230, 0x87)
patch_byte(0x4000231, 0x81)
patch_byte(0x4000232, 0xba)
patch_byte(0x4000233, 0x52)
patch_byte(0x4000234, 0x87)
patch_byte(0x4000235, 0xc)
patch_byte(0x4000236, 0x8f)
patch_byte(0x4000237, 0x72)
patch_byte(0x4000238, 0xf4)
patch_byte(0x4000239, 0x3)
patch_byte(0x400023a, 0x5)
patch_byte(0x400023b, 0x2a)
patch_byte(0x400023c, 0x7f)
patch_byte(0x400023d, 0x2)
patch_byte(0x400023e, 0x9)
patch_byte(0x400023f, 0x6b)
patch_byte(0x4000240, 0xe7)
patch_byte(0x4000241, 0x3)
patch_byte(0x4000242, 0x13)
patch_byte(0x4000243, 0x2a)
patch_byte(0x4000244, 0xf4)
patch_byte(0x4000245, 0x3)
patch_byte(0x4000246, 0x0)
patch_byte(0x4000247, 0x2a)
patch_byte(0x4000248, 0x81)
patch_byte(0x4000249, 0x1)
patch_byte(0x400024a, 0x0)
patch_byte(0x400024b, 0x17)
patch_byte(0x838, 0x72)
patch_byte(0x839, 0xfe)
patch_byte(0x83a, 0xff)
patch_byte(0x83b, 0x14)
|
[
"\nfrom idc import *\nbase = 0x4000000\nseg_size = 0x400000\naddress = base\nadd_segm_ex(base, base + seg_size, 0X1, 2, 1, 2, ADDSEG_NOSREG)\nset_segm_name(base, \"patch\")\nset_segm_class(base, \"CODE\")\nset_segm_type(base, 2)\npatch_byte(0x4000000, 0xe0)\npatch_byte(0x4000001, 0x3)\npatch_byte(0x4000002, 0x14)\npatch_byte(0x4000003, 0x2a)\npatch_byte(0x4000004, 0xf3)\npatch_byte(0x4000005, 0x3)\npatch_byte(0x4000006, 0x7)\npatch_byte(0x4000007, 0x2a)\npatch_byte(0x4000008, 0x7f)\npatch_byte(0x4000009, 0x2)\npatch_byte(0x400000a, 0xa)\npatch_byte(0x400000b, 0x6b)\npatch_byte(0x400000c, 0x7f)\npatch_byte(0x400000d, 0x2)\npatch_byte(0x400000e, 0xb)\npatch_byte(0x400000f, 0x6b)\npatch_byte(0x4000010, 0x7f)\npatch_byte(0x4000011, 0x2)\npatch_byte(0x4000012, 0x8)\npatch_byte(0x4000013, 0x6b)\npatch_byte(0x4000014, 0xe7)\npatch_byte(0x4000015, 0x3)\npatch_byte(0x4000016, 0x13)\npatch_byte(0x4000017, 0x2a)\npatch_byte(0x4000018, 0xf4)\npatch_byte(0x4000019, 0x3)\npatch_byte(0x400001a, 0x0)\npatch_byte(0x400001b, 0x2a)\npatch_byte(0x400001c, 0xed)\npatch_byte(0x400001d, 0x1)\npatch_byte(0x400001e, 0x0)\npatch_byte(0x400001f, 0x17)\npatch_byte(0x754, 0x2b)\npatch_byte(0x755, 0xfe)\npatch_byte(0x756, 0xff)\npatch_byte(0x757, 0x14)\npatch_byte(0x4000020, 0x47)\npatch_byte(0x4000021, 0x0)\npatch_byte(0x4000022, 0x0)\npatch_byte(0x4000023, 0x35)\npatch_byte(0x4000024, 0xa7)\npatch_byte(0x4000025, 0x1)\npatch_byte(0x4000026, 0x0)\npatch_byte(0x4000027, 0x34)\npatch_byte(0x4000028, 0xe0)\npatch_byte(0x4000029, 0x3)\npatch_byte(0x400002a, 0x14)\npatch_byte(0x400002b, 0x2a)\npatch_byte(0x400002c, 0xf3)\npatch_byte(0x400002d, 0x3)\npatch_byte(0x400002e, 0x7)\npatch_byte(0x400002f, 0x2a)\npatch_byte(0x4000030, 0x7f)\npatch_byte(0x4000031, 0x2)\npatch_byte(0x4000032, 0xa)\npatch_byte(0x4000033, 0x6b)\npatch_byte(0x4000034, 0x7f)\npatch_byte(0x4000035, 0x2)\npatch_byte(0x4000036, 0xb)\npatch_byte(0x4000037, 0x6b)\npatch_byte(0x4000038, 0x7f)\npatch_byte(0x4000039, 0x2)\npatch_byte(0x400003a, 0xc)\npatch_byte(0x400003b, 0x6b)\npatch_byte(0x400003c, 0x7f)\npatch_byte(0x400003d, 0x2)\npatch_byte(0x400003e, 0x10)\npatch_byte(0x400003f, 0x6b)\npatch_byte(0x4000040, 0x7f)\npatch_byte(0x4000041, 0x2)\npatch_byte(0x4000042, 0x12)\npatch_byte(0x4000043, 0x6b)\npatch_byte(0x4000044, 0x7f)\npatch_byte(0x4000045, 0x2)\npatch_byte(0x4000046, 0x2)\npatch_byte(0x4000047, 0x6b)\npatch_byte(0x4000048, 0x87)\npatch_byte(0x4000049, 0x81)\npatch_byte(0x400004a, 0xba)\npatch_byte(0x400004b, 0x52)\npatch_byte(0x400004c, 0x87)\npatch_byte(0x400004d, 0xc)\npatch_byte(0x400004e, 0x8f)\npatch_byte(0x400004f, 0x72)\npatch_byte(0x4000050, 0xf4)\npatch_byte(0x4000051, 0x3)\npatch_byte(0x4000052, 0x1)\npatch_byte(0x4000053, 0x2a)\npatch_byte(0x4000054, 0xf5)\npatch_byte(0x4000055, 0x1)\npatch_byte(0x4000056, 0x0)\npatch_byte(0x4000057, 0x17)\npatch_byte(0x4000058, 0xe0)\npatch_byte(0x4000059, 0x3)\npatch_byte(0x400005a, 0x14)\npatch_byte(0x400005b, 0x2a)\npatch_byte(0x400005c, 0xf3)\npatch_byte(0x400005d, 0x3)\npatch_byte(0x400005e, 0x7)\npatch_byte(0x400005f, 0x2a)\npatch_byte(0x4000060, 0x7f)\npatch_byte(0x4000061, 0x2)\npatch_byte(0x4000062, 0xa)\npatch_byte(0x4000063, 0x6b)\npatch_byte(0x4000064, 0x7f)\npatch_byte(0x4000065, 0x2)\npatch_byte(0x4000066, 0xb)\npatch_byte(0x4000067, 0x6b)\npatch_byte(0x4000068, 0x7f)\npatch_byte(0x4000069, 0x2)\npatch_byte(0x400006a, 0xc)\npatch_byte(0x400006b, 0x6b)\npatch_byte(0x400006c, 0x7f)\npatch_byte(0x400006d, 0x2)\npatch_byte(0x400006e, 0x10)\npatch_byte(0x400006f, 0x6b)\npatch_byte(0x4000070, 0x7f)\npatch_byte(0x4000071, 0x2)\npatch_byte(0x4000072, 0x11)\npatch_byte(0x4000073, 0x6b)\npatch_byte(0x4000074, 0xe7)\npatch_byte(0x4000075, 0x3)\npatch_byte(0x4000076, 0x13)\npatch_byte(0x4000077, 0x2a)\npatch_byte(0x4000078, 0xf4)\npatch_byte(0x4000079, 0x3)\npatch_byte(0x400007a, 0x0)\npatch_byte(0x400007b, 0x2a)\npatch_byte(0x400007c, 0xe0)\npatch_byte(0x400007d, 0x1)\npatch_byte(0x400007e, 0x0)\npatch_byte(0x400007f, 0x17)\npatch_byte(0x760, 0xe7)\npatch_byte(0x761, 0x17)\npatch_byte(0x762, 0x9f)\npatch_byte(0x763, 0x1a)\npatch_byte(0x764, 0x2f)\npatch_byte(0x765, 0xfe)\npatch_byte(0x766, 0xff)\npatch_byte(0x767, 0x14)\npatch_byte(0x4000080, 0x47)\npatch_byte(0x4000081, 0x0)\npatch_byte(0x4000082, 0x0)\npatch_byte(0x4000083, 0x35)\npatch_byte(0x4000084, 0x27)\npatch_byte(0x4000085, 0x1)\npatch_byte(0x4000086, 0x0)\npatch_byte(0x4000087, 0x34)\npatch_byte(0x4000088, 0xe0)\npatch_byte(0x4000089, 0x3)\npatch_byte(0x400008a, 0x14)\npatch_byte(0x400008b, 0x2a)\npatch_byte(0x400008c, 0xf3)\npatch_byte(0x400008d, 0x3)\npatch_byte(0x400008e, 0x7)\npatch_byte(0x400008f, 0x2a)\npatch_byte(0x4000090, 0x7f)\npatch_byte(0x4000091, 0x2)\npatch_byte(0x4000092, 0xa)\npatch_byte(0x4000093, 0x6b)\npatch_byte(0x4000094, 0x7f)\npatch_byte(0x4000095, 0x2)\npatch_byte(0x4000096, 0xb)\npatch_byte(0x4000097, 0x6b)\npatch_byte(0x4000098, 0x7f)\npatch_byte(0x4000099, 0x2)\npatch_byte(0x400009a, 0xc)\npatch_byte(0x400009b, 0x6b)\npatch_byte(0x400009c, 0x7f)\npatch_byte(0x400009d, 0x2)\npatch_byte(0x400009e, 0x10)\npatch_byte(0x400009f, 0x6b)\npatch_byte(0x40000a0, 0x7f)\npatch_byte(0x40000a1, 0x2)\npatch_byte(0x40000a2, 0x12)\npatch_byte(0x40000a3, 0x6b)\npatch_byte(0x40000a4, 0xdc)\npatch_byte(0x40000a5, 0x1)\npatch_byte(0x40000a6, 0x0)\npatch_byte(0x40000a7, 0x17)\npatch_byte(0x40000a8, 0xe0)\npatch_byte(0x40000a9, 0x3)\npatch_byte(0x40000aa, 0x14)\npatch_byte(0x40000ab, 0x2a)\npatch_byte(0x40000ac, 0xf3)\npatch_byte(0x40000ad, 0x3)\npatch_byte(0x40000ae, 0x7)\npatch_byte(0x40000af, 0x2a)\npatch_byte(0x40000b0, 0x7f)\npatch_byte(0x40000b1, 0x2)\npatch_byte(0x40000b2, 0xa)\npatch_byte(0x40000b3, 0x6b)\npatch_byte(0x40000b4, 0x7f)\npatch_byte(0x40000b5, 0x2)\npatch_byte(0x40000b6, 0xb)\npatch_byte(0x40000b7, 0x6b)\npatch_byte(0x40000b8, 0x7f)\npatch_byte(0x40000b9, 0x2)\npatch_byte(0x40000ba, 0xc)\npatch_byte(0x40000bb, 0x6b)\npatch_byte(0x40000bc, 0x7f)\npatch_byte(0x40000bd, 0x2)\npatch_byte(0x40000be, 0xd)\npatch_byte(0x40000bf, 0x6b)\npatch_byte(0x40000c0, 0xe7)\npatch_byte(0x40000c1, 0x3)\npatch_byte(0x40000c2, 0x13)\npatch_byte(0x40000c3, 0x2a)\npatch_byte(0x40000c4, 0xf4)\npatch_byte(0x40000c5, 0x3)\npatch_byte(0x40000c6, 0x0)\npatch_byte(0x40000c7, 0x2a)\npatch_byte(0x40000c8, 0xb4)\npatch_byte(0x40000c9, 0x1)\npatch_byte(0x40000ca, 0x0)\npatch_byte(0x40000cb, 0x17)\npatch_byte(0x7d8, 0xe7)\npatch_byte(0x7d9, 0x17)\npatch_byte(0x7da, 0x9f)\npatch_byte(0x7db, 0x1a)\npatch_byte(0x7e0, 0x28)\npatch_byte(0x7e1, 0xfe)\npatch_byte(0x7e2, 0xff)\npatch_byte(0x7e3, 0x14)\npatch_byte(0x40000cc, 0xe0)\npatch_byte(0x40000cd, 0x3)\npatch_byte(0x40000ce, 0x14)\npatch_byte(0x40000cf, 0x2a)\npatch_byte(0x40000d0, 0xf3)\npatch_byte(0x40000d1, 0x3)\npatch_byte(0x40000d2, 0x7)\npatch_byte(0x40000d3, 0x2a)\npatch_byte(0x40000d4, 0x7f)\npatch_byte(0x40000d5, 0x2)\npatch_byte(0x40000d6, 0xa)\npatch_byte(0x40000d7, 0x6b)\npatch_byte(0x40000d8, 0x7f)\npatch_byte(0x40000d9, 0x2)\npatch_byte(0x40000da, 0xb)\npatch_byte(0x40000db, 0x6b)\npatch_byte(0x40000dc, 0x7f)\npatch_byte(0x40000dd, 0x2)\npatch_byte(0x40000de, 0xc)\npatch_byte(0x40000df, 0x6b)\npatch_byte(0x40000e0, 0x7f)\npatch_byte(0x40000e1, 0x2)\npatch_byte(0x40000e2, 0x10)\npatch_byte(0x40000e3, 0x6b)\npatch_byte(0x40000e4, 0x7f)\npatch_byte(0x40000e5, 0x2)\npatch_byte(0x40000e6, 0x12)\npatch_byte(0x40000e7, 0x6b)\npatch_byte(0x40000e8, 0x7f)\npatch_byte(0x40000e9, 0x2)\npatch_byte(0x40000ea, 0x2)\npatch_byte(0x40000eb, 0x6b)\npatch_byte(0x40000ec, 0x87)\npatch_byte(0x40000ed, 0x81)\npatch_byte(0x40000ee, 0xba)\npatch_byte(0x40000ef, 0x52)\npatch_byte(0x40000f0, 0x87)\npatch_byte(0x40000f1, 0xc)\npatch_byte(0x40000f2, 0x8f)\npatch_byte(0x40000f3, 0x72)\npatch_byte(0x40000f4, 0xf4)\npatch_byte(0x40000f5, 0x3)\npatch_byte(0x40000f6, 0x1)\npatch_byte(0x40000f7, 0x2a)\npatch_byte(0x40000f8, 0x7f)\npatch_byte(0x40000f9, 0x2)\npatch_byte(0x40000fa, 0x4)\npatch_byte(0x40000fb, 0x6b)\npatch_byte(0x40000fc, 0x87)\npatch_byte(0x40000fd, 0x81)\npatch_byte(0x40000fe, 0xba)\npatch_byte(0x40000ff, 0x52)\npatch_byte(0x4000100, 0x87)\npatch_byte(0x4000101, 0xc)\npatch_byte(0x4000102, 0x8f)\npatch_byte(0x4000103, 0x72)\npatch_byte(0x4000104, 0xf4)\npatch_byte(0x4000105, 0x3)\npatch_byte(0x4000106, 0x5)\npatch_byte(0x4000107, 0x2a)\npatch_byte(0x4000108, 0x7f)\npatch_byte(0x4000109, 0x2)\npatch_byte(0x400010a, 0x9)\npatch_byte(0x400010b, 0x6b)\npatch_byte(0x400010c, 0xe7)\npatch_byte(0x400010d, 0x3)\npatch_byte(0x400010e, 0x13)\npatch_byte(0x400010f, 0x2a)\npatch_byte(0x4000110, 0xf4)\npatch_byte(0x4000111, 0x3)\npatch_byte(0x4000112, 0x0)\npatch_byte(0x4000113, 0x2a)\npatch_byte(0x4000114, 0xce)\npatch_byte(0x4000115, 0x1)\npatch_byte(0x4000116, 0x0)\npatch_byte(0x4000117, 0x17)\npatch_byte(0x824, 0x2a)\npatch_byte(0x825, 0xfe)\npatch_byte(0x826, 0xff)\npatch_byte(0x827, 0x14)\npatch_byte(0x4000118, 0xe0)\npatch_byte(0x4000119, 0x3)\npatch_byte(0x400011a, 0x14)\npatch_byte(0x400011b, 0x2a)\npatch_byte(0x400011c, 0xf3)\npatch_byte(0x400011d, 0x3)\npatch_byte(0x400011e, 0x7)\npatch_byte(0x400011f, 0x2a)\npatch_byte(0x4000120, 0x7f)\npatch_byte(0x4000121, 0x2)\npatch_byte(0x4000122, 0xa)\npatch_byte(0x4000123, 0x6b)\npatch_byte(0x4000124, 0x7f)\npatch_byte(0x4000125, 0x2)\npatch_byte(0x4000126, 0xb)\npatch_byte(0x4000127, 0x6b)\npatch_byte(0x4000128, 0x7f)\npatch_byte(0x4000129, 0x2)\npatch_byte(0x400012a, 0xc)\npatch_byte(0x400012b, 0x6b)\npatch_byte(0x400012c, 0x7f)\npatch_byte(0x400012d, 0x2)\npatch_byte(0x400012e, 0x10)\npatch_byte(0x400012f, 0x6b)\npatch_byte(0x4000130, 0x7f)\npatch_byte(0x4000131, 0x2)\npatch_byte(0x4000132, 0x12)\npatch_byte(0x4000133, 0x6b)\npatch_byte(0x4000134, 0x7f)\npatch_byte(0x4000135, 0x2)\npatch_byte(0x4000136, 0x2)\npatch_byte(0x4000137, 0x6b)\npatch_byte(0x4000138, 0x87)\npatch_byte(0x4000139, 0x81)\npatch_byte(0x400013a, 0xba)\npatch_byte(0x400013b, 0x52)\npatch_byte(0x400013c, 0x87)\npatch_byte(0x400013d, 0xc)\npatch_byte(0x400013e, 0x8f)\npatch_byte(0x400013f, 0x72)\npatch_byte(0x4000140, 0xf4)\npatch_byte(0x4000141, 0x3)\npatch_byte(0x4000142, 0x1)\npatch_byte(0x4000143, 0x2a)\npatch_byte(0x4000144, 0x7f)\npatch_byte(0x4000145, 0x2)\npatch_byte(0x4000146, 0x4)\npatch_byte(0x4000147, 0x6b)\npatch_byte(0x4000148, 0x87)\npatch_byte(0x4000149, 0x81)\npatch_byte(0x400014a, 0xba)\npatch_byte(0x400014b, 0x52)\npatch_byte(0x400014c, 0x87)\npatch_byte(0x400014d, 0xc)\npatch_byte(0x400014e, 0x8f)\npatch_byte(0x400014f, 0x72)\npatch_byte(0x4000150, 0xf4)\npatch_byte(0x4000151, 0x3)\npatch_byte(0x4000152, 0x5)\npatch_byte(0x4000153, 0x2a)\npatch_byte(0x4000154, 0x7f)\npatch_byte(0x4000155, 0x2)\npatch_byte(0x4000156, 0x9)\npatch_byte(0x4000157, 0x6b)\npatch_byte(0x4000158, 0xe7)\npatch_byte(0x4000159, 0x3)\npatch_byte(0x400015a, 0x13)\npatch_byte(0x400015b, 0x2a)\npatch_byte(0x400015c, 0xf4)\npatch_byte(0x400015d, 0x3)\npatch_byte(0x400015e, 0x0)\npatch_byte(0x400015f, 0x2a)\npatch_byte(0x4000160, 0xbb)\npatch_byte(0x4000161, 0x1)\npatch_byte(0x4000162, 0x0)\npatch_byte(0x4000163, 0x17)\npatch_byte(0x808, 0x44)\npatch_byte(0x809, 0xfe)\npatch_byte(0x80a, 0xff)\npatch_byte(0x80b, 0x14)\npatch_byte(0x4000164, 0x47)\npatch_byte(0x4000165, 0x0)\npatch_byte(0x4000166, 0x0)\npatch_byte(0x4000167, 0x35)\npatch_byte(0x4000168, 0x87)\npatch_byte(0x4000169, 0x1)\npatch_byte(0x400016a, 0x0)\npatch_byte(0x400016b, 0x34)\npatch_byte(0x400016c, 0xe0)\npatch_byte(0x400016d, 0x3)\npatch_byte(0x400016e, 0x14)\npatch_byte(0x400016f, 0x2a)\npatch_byte(0x4000170, 0xf3)\npatch_byte(0x4000171, 0x3)\npatch_byte(0x4000172, 0x7)\npatch_byte(0x4000173, 0x2a)\npatch_byte(0x4000174, 0x7f)\npatch_byte(0x4000175, 0x2)\npatch_byte(0x4000176, 0xa)\npatch_byte(0x4000177, 0x6b)\npatch_byte(0x4000178, 0x7f)\npatch_byte(0x4000179, 0x2)\npatch_byte(0x400017a, 0xb)\npatch_byte(0x400017b, 0x6b)\npatch_byte(0x400017c, 0x7f)\npatch_byte(0x400017d, 0x2)\npatch_byte(0x400017e, 0xc)\npatch_byte(0x400017f, 0x6b)\npatch_byte(0x4000180, 0x7f)\npatch_byte(0x4000181, 0x2)\npatch_byte(0x4000182, 0x10)\npatch_byte(0x4000183, 0x6b)\npatch_byte(0x4000184, 0x7f)\npatch_byte(0x4000185, 0x2)\npatch_byte(0x4000186, 0x12)\npatch_byte(0x4000187, 0x6b)\npatch_byte(0x4000188, 0x7f)\npatch_byte(0x4000189, 0x2)\npatch_byte(0x400018a, 0xf)\npatch_byte(0x400018b, 0x6b)\npatch_byte(0x400018c, 0xe7)\npatch_byte(0x400018d, 0x3)\npatch_byte(0x400018e, 0x13)\npatch_byte(0x400018f, 0x2a)\npatch_byte(0x4000190, 0xf4)\npatch_byte(0x4000191, 0x3)\npatch_byte(0x4000192, 0x0)\npatch_byte(0x4000193, 0x2a)\npatch_byte(0x4000194, 0xb3)\npatch_byte(0x4000195, 0x1)\npatch_byte(0x4000196, 0x0)\npatch_byte(0x4000197, 0x17)\npatch_byte(0x4000198, 0xe0)\npatch_byte(0x4000199, 0x3)\npatch_byte(0x400019a, 0x14)\npatch_byte(0x400019b, 0x2a)\npatch_byte(0x400019c, 0xf3)\npatch_byte(0x400019d, 0x3)\npatch_byte(0x400019e, 0x7)\npatch_byte(0x400019f, 0x2a)\npatch_byte(0x40001a0, 0x7f)\npatch_byte(0x40001a1, 0x2)\npatch_byte(0x40001a2, 0xa)\npatch_byte(0x40001a3, 0x6b)\npatch_byte(0x40001a4, 0x7f)\npatch_byte(0x40001a5, 0x2)\npatch_byte(0x40001a6, 0xe)\npatch_byte(0x40001a7, 0x6b)\npatch_byte(0x40001a8, 0xe7)\npatch_byte(0x40001a9, 0x3)\npatch_byte(0x40001aa, 0x13)\npatch_byte(0x40001ab, 0x2a)\npatch_byte(0x40001ac, 0xf4)\npatch_byte(0x40001ad, 0x3)\npatch_byte(0x40001ae, 0x0)\npatch_byte(0x40001af, 0x2a)\npatch_byte(0x40001b0, 0x83)\npatch_byte(0x40001b1, 0x1)\npatch_byte(0x40001b2, 0x0)\npatch_byte(0x40001b3, 0x17)\npatch_byte(0x7a0, 0xe7)\npatch_byte(0x7a1, 0x17)\npatch_byte(0x7a2, 0x9f)\npatch_byte(0x7a3, 0x1a)\npatch_byte(0x7a8, 0x6f)\npatch_byte(0x7a9, 0xfe)\npatch_byte(0x7aa, 0xff)\npatch_byte(0x7ab, 0x14)\npatch_byte(0x40001b4, 0xe0)\npatch_byte(0x40001b5, 0x3)\npatch_byte(0x40001b6, 0x14)\npatch_byte(0x40001b7, 0x2a)\npatch_byte(0x40001b8, 0xf3)\npatch_byte(0x40001b9, 0x3)\npatch_byte(0x40001ba, 0x7)\npatch_byte(0x40001bb, 0x2a)\npatch_byte(0x40001bc, 0x7f)\npatch_byte(0x40001bd, 0x2)\npatch_byte(0x40001be, 0xa)\npatch_byte(0x40001bf, 0x6b)\npatch_byte(0x40001c0, 0x7f)\npatch_byte(0x40001c1, 0x2)\npatch_byte(0x40001c2, 0xb)\npatch_byte(0x40001c3, 0x6b)\npatch_byte(0x40001c4, 0x7f)\npatch_byte(0x40001c5, 0x2)\npatch_byte(0x40001c6, 0xc)\npatch_byte(0x40001c7, 0x6b)\npatch_byte(0x40001c8, 0x7f)\npatch_byte(0x40001c9, 0x2)\npatch_byte(0x40001ca, 0x10)\npatch_byte(0x40001cb, 0x6b)\npatch_byte(0x40001cc, 0x7f)\npatch_byte(0x40001cd, 0x2)\npatch_byte(0x40001ce, 0x12)\npatch_byte(0x40001cf, 0x6b)\npatch_byte(0x40001d0, 0x7f)\npatch_byte(0x40001d1, 0x2)\npatch_byte(0x40001d2, 0x2)\npatch_byte(0x40001d3, 0x6b)\npatch_byte(0x40001d4, 0x87)\npatch_byte(0x40001d5, 0x81)\npatch_byte(0x40001d6, 0xba)\npatch_byte(0x40001d7, 0x52)\npatch_byte(0x40001d8, 0x87)\npatch_byte(0x40001d9, 0xc)\npatch_byte(0x40001da, 0x8f)\npatch_byte(0x40001db, 0x72)\npatch_byte(0x40001dc, 0xf4)\npatch_byte(0x40001dd, 0x3)\npatch_byte(0x40001de, 0x1)\npatch_byte(0x40001df, 0x2a)\npatch_byte(0x40001e0, 0x7f)\npatch_byte(0x40001e1, 0x2)\npatch_byte(0x40001e2, 0x4)\npatch_byte(0x40001e3, 0x6b)\npatch_byte(0x40001e4, 0x87)\npatch_byte(0x40001e5, 0x81)\npatch_byte(0x40001e6, 0xba)\npatch_byte(0x40001e7, 0x52)\npatch_byte(0x40001e8, 0x87)\npatch_byte(0x40001e9, 0xc)\npatch_byte(0x40001ea, 0x8f)\npatch_byte(0x40001eb, 0x72)\npatch_byte(0x40001ec, 0xf4)\npatch_byte(0x40001ed, 0x3)\npatch_byte(0x40001ee, 0x5)\npatch_byte(0x40001ef, 0x2a)\npatch_byte(0x40001f0, 0x7f)\npatch_byte(0x40001f1, 0x2)\npatch_byte(0x40001f2, 0x9)\npatch_byte(0x40001f3, 0x6b)\npatch_byte(0x40001f4, 0xe7)\npatch_byte(0x40001f5, 0x3)\npatch_byte(0x40001f6, 0x13)\npatch_byte(0x40001f7, 0x2a)\npatch_byte(0x40001f8, 0xf4)\npatch_byte(0x40001f9, 0x3)\npatch_byte(0x40001fa, 0x0)\npatch_byte(0x40001fb, 0x2a)\npatch_byte(0x40001fc, 0x94)\npatch_byte(0x40001fd, 0x1)\npatch_byte(0x40001fe, 0x0)\npatch_byte(0x40001ff, 0x17)\npatch_byte(0x86c, 0x52)\npatch_byte(0x86d, 0xfe)\npatch_byte(0x86e, 0xff)\npatch_byte(0x86f, 0x14)\npatch_byte(0x4000200, 0xe0)\npatch_byte(0x4000201, 0x3)\npatch_byte(0x4000202, 0x14)\npatch_byte(0x4000203, 0x2a)\npatch_byte(0x4000204, 0xf3)\npatch_byte(0x4000205, 0x3)\npatch_byte(0x4000206, 0x7)\npatch_byte(0x4000207, 0x2a)\npatch_byte(0x4000208, 0x7f)\npatch_byte(0x4000209, 0x2)\npatch_byte(0x400020a, 0xa)\npatch_byte(0x400020b, 0x6b)\npatch_byte(0x400020c, 0x7f)\npatch_byte(0x400020d, 0x2)\npatch_byte(0x400020e, 0xb)\npatch_byte(0x400020f, 0x6b)\npatch_byte(0x4000210, 0x7f)\npatch_byte(0x4000211, 0x2)\npatch_byte(0x4000212, 0xc)\npatch_byte(0x4000213, 0x6b)\npatch_byte(0x4000214, 0x7f)\npatch_byte(0x4000215, 0x2)\npatch_byte(0x4000216, 0x10)\npatch_byte(0x4000217, 0x6b)\npatch_byte(0x4000218, 0x7f)\npatch_byte(0x4000219, 0x2)\npatch_byte(0x400021a, 0x12)\npatch_byte(0x400021b, 0x6b)\npatch_byte(0x400021c, 0x7f)\npatch_byte(0x400021d, 0x2)\npatch_byte(0x400021e, 0x2)\npatch_byte(0x400021f, 0x6b)\npatch_byte(0x4000220, 0x87)\npatch_byte(0x4000221, 0x81)\npatch_byte(0x4000222, 0xba)\npatch_byte(0x4000223, 0x52)\npatch_byte(0x4000224, 0x87)\npatch_byte(0x4000225, 0xc)\npatch_byte(0x4000226, 0x8f)\npatch_byte(0x4000227, 0x72)\npatch_byte(0x4000228, 0xf4)\npatch_byte(0x4000229, 0x3)\npatch_byte(0x400022a, 0x1)\npatch_byte(0x400022b, 0x2a)\npatch_byte(0x400022c, 0x7f)\npatch_byte(0x400022d, 0x2)\npatch_byte(0x400022e, 0x4)\npatch_byte(0x400022f, 0x6b)\npatch_byte(0x4000230, 0x87)\npatch_byte(0x4000231, 0x81)\npatch_byte(0x4000232, 0xba)\npatch_byte(0x4000233, 0x52)\npatch_byte(0x4000234, 0x87)\npatch_byte(0x4000235, 0xc)\npatch_byte(0x4000236, 0x8f)\npatch_byte(0x4000237, 0x72)\npatch_byte(0x4000238, 0xf4)\npatch_byte(0x4000239, 0x3)\npatch_byte(0x400023a, 0x5)\npatch_byte(0x400023b, 0x2a)\npatch_byte(0x400023c, 0x7f)\npatch_byte(0x400023d, 0x2)\npatch_byte(0x400023e, 0x9)\npatch_byte(0x400023f, 0x6b)\npatch_byte(0x4000240, 0xe7)\npatch_byte(0x4000241, 0x3)\npatch_byte(0x4000242, 0x13)\npatch_byte(0x4000243, 0x2a)\npatch_byte(0x4000244, 0xf4)\npatch_byte(0x4000245, 0x3)\npatch_byte(0x4000246, 0x0)\npatch_byte(0x4000247, 0x2a)\npatch_byte(0x4000248, 0x81)\npatch_byte(0x4000249, 0x1)\npatch_byte(0x400024a, 0x0)\npatch_byte(0x400024b, 0x17)\npatch_byte(0x838, 0x72)\npatch_byte(0x839, 0xfe)\npatch_byte(0x83a, 0xff)\npatch_byte(0x83b, 0x14)\n",
"from idc import *\nbase = 67108864\nseg_size = 4194304\naddress = base\nadd_segm_ex(base, base + seg_size, 1, 2, 1, 2, ADDSEG_NOSREG)\nset_segm_name(base, 'patch')\nset_segm_class(base, 'CODE')\nset_segm_type(base, 2)\npatch_byte(67108864, 224)\npatch_byte(67108865, 3)\npatch_byte(67108866, 20)\npatch_byte(67108867, 42)\npatch_byte(67108868, 243)\npatch_byte(67108869, 3)\npatch_byte(67108870, 7)\npatch_byte(67108871, 42)\npatch_byte(67108872, 127)\npatch_byte(67108873, 2)\npatch_byte(67108874, 10)\npatch_byte(67108875, 107)\npatch_byte(67108876, 127)\npatch_byte(67108877, 2)\npatch_byte(67108878, 11)\npatch_byte(67108879, 107)\npatch_byte(67108880, 127)\npatch_byte(67108881, 2)\npatch_byte(67108882, 8)\npatch_byte(67108883, 107)\npatch_byte(67108884, 231)\npatch_byte(67108885, 3)\npatch_byte(67108886, 19)\npatch_byte(67108887, 42)\npatch_byte(67108888, 244)\npatch_byte(67108889, 3)\npatch_byte(67108890, 0)\npatch_byte(67108891, 42)\npatch_byte(67108892, 237)\npatch_byte(67108893, 1)\npatch_byte(67108894, 0)\npatch_byte(67108895, 23)\npatch_byte(1876, 43)\npatch_byte(1877, 254)\npatch_byte(1878, 255)\npatch_byte(1879, 20)\npatch_byte(67108896, 71)\npatch_byte(67108897, 0)\npatch_byte(67108898, 0)\npatch_byte(67108899, 53)\npatch_byte(67108900, 167)\npatch_byte(67108901, 1)\npatch_byte(67108902, 0)\npatch_byte(67108903, 52)\npatch_byte(67108904, 224)\npatch_byte(67108905, 3)\npatch_byte(67108906, 20)\npatch_byte(67108907, 42)\npatch_byte(67108908, 243)\npatch_byte(67108909, 3)\npatch_byte(67108910, 7)\npatch_byte(67108911, 42)\npatch_byte(67108912, 127)\npatch_byte(67108913, 2)\npatch_byte(67108914, 10)\npatch_byte(67108915, 107)\npatch_byte(67108916, 127)\npatch_byte(67108917, 2)\npatch_byte(67108918, 11)\npatch_byte(67108919, 107)\npatch_byte(67108920, 127)\npatch_byte(67108921, 2)\npatch_byte(67108922, 12)\npatch_byte(67108923, 107)\npatch_byte(67108924, 127)\npatch_byte(67108925, 2)\npatch_byte(67108926, 16)\npatch_byte(67108927, 107)\npatch_byte(67108928, 127)\npatch_byte(67108929, 2)\npatch_byte(67108930, 18)\npatch_byte(67108931, 107)\npatch_byte(67108932, 127)\npatch_byte(67108933, 2)\npatch_byte(67108934, 2)\npatch_byte(67108935, 107)\npatch_byte(67108936, 135)\npatch_byte(67108937, 129)\npatch_byte(67108938, 186)\npatch_byte(67108939, 82)\npatch_byte(67108940, 135)\npatch_byte(67108941, 12)\npatch_byte(67108942, 143)\npatch_byte(67108943, 114)\npatch_byte(67108944, 244)\npatch_byte(67108945, 3)\npatch_byte(67108946, 1)\npatch_byte(67108947, 42)\npatch_byte(67108948, 245)\npatch_byte(67108949, 1)\npatch_byte(67108950, 0)\npatch_byte(67108951, 23)\npatch_byte(67108952, 224)\npatch_byte(67108953, 3)\npatch_byte(67108954, 20)\npatch_byte(67108955, 42)\npatch_byte(67108956, 243)\npatch_byte(67108957, 3)\npatch_byte(67108958, 7)\npatch_byte(67108959, 42)\npatch_byte(67108960, 127)\npatch_byte(67108961, 2)\npatch_byte(67108962, 10)\npatch_byte(67108963, 107)\npatch_byte(67108964, 127)\npatch_byte(67108965, 2)\npatch_byte(67108966, 11)\npatch_byte(67108967, 107)\npatch_byte(67108968, 127)\npatch_byte(67108969, 2)\npatch_byte(67108970, 12)\npatch_byte(67108971, 107)\npatch_byte(67108972, 127)\npatch_byte(67108973, 2)\npatch_byte(67108974, 16)\npatch_byte(67108975, 107)\npatch_byte(67108976, 127)\npatch_byte(67108977, 2)\npatch_byte(67108978, 17)\npatch_byte(67108979, 107)\npatch_byte(67108980, 231)\npatch_byte(67108981, 3)\npatch_byte(67108982, 19)\npatch_byte(67108983, 42)\npatch_byte(67108984, 244)\npatch_byte(67108985, 3)\npatch_byte(67108986, 0)\npatch_byte(67108987, 42)\npatch_byte(67108988, 224)\npatch_byte(67108989, 1)\npatch_byte(67108990, 0)\npatch_byte(67108991, 23)\npatch_byte(1888, 231)\npatch_byte(1889, 23)\npatch_byte(1890, 159)\npatch_byte(1891, 26)\npatch_byte(1892, 47)\npatch_byte(1893, 254)\npatch_byte(1894, 255)\npatch_byte(1895, 20)\npatch_byte(67108992, 71)\npatch_byte(67108993, 0)\npatch_byte(67108994, 0)\npatch_byte(67108995, 53)\npatch_byte(67108996, 39)\npatch_byte(67108997, 1)\npatch_byte(67108998, 0)\npatch_byte(67108999, 52)\npatch_byte(67109000, 224)\npatch_byte(67109001, 3)\npatch_byte(67109002, 20)\npatch_byte(67109003, 42)\npatch_byte(67109004, 243)\npatch_byte(67109005, 3)\npatch_byte(67109006, 7)\npatch_byte(67109007, 42)\npatch_byte(67109008, 127)\npatch_byte(67109009, 2)\npatch_byte(67109010, 10)\npatch_byte(67109011, 107)\npatch_byte(67109012, 127)\npatch_byte(67109013, 2)\npatch_byte(67109014, 11)\npatch_byte(67109015, 107)\npatch_byte(67109016, 127)\npatch_byte(67109017, 2)\npatch_byte(67109018, 12)\npatch_byte(67109019, 107)\npatch_byte(67109020, 127)\npatch_byte(67109021, 2)\npatch_byte(67109022, 16)\npatch_byte(67109023, 107)\npatch_byte(67109024, 127)\npatch_byte(67109025, 2)\npatch_byte(67109026, 18)\npatch_byte(67109027, 107)\npatch_byte(67109028, 220)\npatch_byte(67109029, 1)\npatch_byte(67109030, 0)\npatch_byte(67109031, 23)\npatch_byte(67109032, 224)\npatch_byte(67109033, 3)\npatch_byte(67109034, 20)\npatch_byte(67109035, 42)\npatch_byte(67109036, 243)\npatch_byte(67109037, 3)\npatch_byte(67109038, 7)\npatch_byte(67109039, 42)\npatch_byte(67109040, 127)\npatch_byte(67109041, 2)\npatch_byte(67109042, 10)\npatch_byte(67109043, 107)\npatch_byte(67109044, 127)\npatch_byte(67109045, 2)\npatch_byte(67109046, 11)\npatch_byte(67109047, 107)\npatch_byte(67109048, 127)\npatch_byte(67109049, 2)\npatch_byte(67109050, 12)\npatch_byte(67109051, 107)\npatch_byte(67109052, 127)\npatch_byte(67109053, 2)\npatch_byte(67109054, 13)\npatch_byte(67109055, 107)\npatch_byte(67109056, 231)\npatch_byte(67109057, 3)\npatch_byte(67109058, 19)\npatch_byte(67109059, 42)\npatch_byte(67109060, 244)\npatch_byte(67109061, 3)\npatch_byte(67109062, 0)\npatch_byte(67109063, 42)\npatch_byte(67109064, 180)\npatch_byte(67109065, 1)\npatch_byte(67109066, 0)\npatch_byte(67109067, 23)\npatch_byte(2008, 231)\npatch_byte(2009, 23)\npatch_byte(2010, 159)\npatch_byte(2011, 26)\npatch_byte(2016, 40)\npatch_byte(2017, 254)\npatch_byte(2018, 255)\npatch_byte(2019, 20)\npatch_byte(67109068, 224)\npatch_byte(67109069, 3)\npatch_byte(67109070, 20)\npatch_byte(67109071, 42)\npatch_byte(67109072, 243)\npatch_byte(67109073, 3)\npatch_byte(67109074, 7)\npatch_byte(67109075, 42)\npatch_byte(67109076, 127)\npatch_byte(67109077, 2)\npatch_byte(67109078, 10)\npatch_byte(67109079, 107)\npatch_byte(67109080, 127)\npatch_byte(67109081, 2)\npatch_byte(67109082, 11)\npatch_byte(67109083, 107)\npatch_byte(67109084, 127)\npatch_byte(67109085, 2)\npatch_byte(67109086, 12)\npatch_byte(67109087, 107)\npatch_byte(67109088, 127)\npatch_byte(67109089, 2)\npatch_byte(67109090, 16)\npatch_byte(67109091, 107)\npatch_byte(67109092, 127)\npatch_byte(67109093, 2)\npatch_byte(67109094, 18)\npatch_byte(67109095, 107)\npatch_byte(67109096, 127)\npatch_byte(67109097, 2)\npatch_byte(67109098, 2)\npatch_byte(67109099, 107)\npatch_byte(67109100, 135)\npatch_byte(67109101, 129)\npatch_byte(67109102, 186)\npatch_byte(67109103, 82)\npatch_byte(67109104, 135)\npatch_byte(67109105, 12)\npatch_byte(67109106, 143)\npatch_byte(67109107, 114)\npatch_byte(67109108, 244)\npatch_byte(67109109, 3)\npatch_byte(67109110, 1)\npatch_byte(67109111, 42)\npatch_byte(67109112, 127)\npatch_byte(67109113, 2)\npatch_byte(67109114, 4)\npatch_byte(67109115, 107)\npatch_byte(67109116, 135)\npatch_byte(67109117, 129)\npatch_byte(67109118, 186)\npatch_byte(67109119, 82)\npatch_byte(67109120, 135)\npatch_byte(67109121, 12)\npatch_byte(67109122, 143)\npatch_byte(67109123, 114)\npatch_byte(67109124, 244)\npatch_byte(67109125, 3)\npatch_byte(67109126, 5)\npatch_byte(67109127, 42)\npatch_byte(67109128, 127)\npatch_byte(67109129, 2)\npatch_byte(67109130, 9)\npatch_byte(67109131, 107)\npatch_byte(67109132, 231)\npatch_byte(67109133, 3)\npatch_byte(67109134, 19)\npatch_byte(67109135, 42)\npatch_byte(67109136, 244)\npatch_byte(67109137, 3)\npatch_byte(67109138, 0)\npatch_byte(67109139, 42)\npatch_byte(67109140, 206)\npatch_byte(67109141, 1)\npatch_byte(67109142, 0)\npatch_byte(67109143, 23)\npatch_byte(2084, 42)\npatch_byte(2085, 254)\npatch_byte(2086, 255)\npatch_byte(2087, 20)\npatch_byte(67109144, 224)\npatch_byte(67109145, 3)\npatch_byte(67109146, 20)\npatch_byte(67109147, 42)\npatch_byte(67109148, 243)\npatch_byte(67109149, 3)\npatch_byte(67109150, 7)\npatch_byte(67109151, 42)\npatch_byte(67109152, 127)\npatch_byte(67109153, 2)\npatch_byte(67109154, 10)\npatch_byte(67109155, 107)\npatch_byte(67109156, 127)\npatch_byte(67109157, 2)\npatch_byte(67109158, 11)\npatch_byte(67109159, 107)\npatch_byte(67109160, 127)\npatch_byte(67109161, 2)\npatch_byte(67109162, 12)\npatch_byte(67109163, 107)\npatch_byte(67109164, 127)\npatch_byte(67109165, 2)\npatch_byte(67109166, 16)\npatch_byte(67109167, 107)\npatch_byte(67109168, 127)\npatch_byte(67109169, 2)\npatch_byte(67109170, 18)\npatch_byte(67109171, 107)\npatch_byte(67109172, 127)\npatch_byte(67109173, 2)\npatch_byte(67109174, 2)\npatch_byte(67109175, 107)\npatch_byte(67109176, 135)\npatch_byte(67109177, 129)\npatch_byte(67109178, 186)\npatch_byte(67109179, 82)\npatch_byte(67109180, 135)\npatch_byte(67109181, 12)\npatch_byte(67109182, 143)\npatch_byte(67109183, 114)\npatch_byte(67109184, 244)\npatch_byte(67109185, 3)\npatch_byte(67109186, 1)\npatch_byte(67109187, 42)\npatch_byte(67109188, 127)\npatch_byte(67109189, 2)\npatch_byte(67109190, 4)\npatch_byte(67109191, 107)\npatch_byte(67109192, 135)\npatch_byte(67109193, 129)\npatch_byte(67109194, 186)\npatch_byte(67109195, 82)\npatch_byte(67109196, 135)\npatch_byte(67109197, 12)\npatch_byte(67109198, 143)\npatch_byte(67109199, 114)\npatch_byte(67109200, 244)\npatch_byte(67109201, 3)\npatch_byte(67109202, 5)\npatch_byte(67109203, 42)\npatch_byte(67109204, 127)\npatch_byte(67109205, 2)\npatch_byte(67109206, 9)\npatch_byte(67109207, 107)\npatch_byte(67109208, 231)\npatch_byte(67109209, 3)\npatch_byte(67109210, 19)\npatch_byte(67109211, 42)\npatch_byte(67109212, 244)\npatch_byte(67109213, 3)\npatch_byte(67109214, 0)\npatch_byte(67109215, 42)\npatch_byte(67109216, 187)\npatch_byte(67109217, 1)\npatch_byte(67109218, 0)\npatch_byte(67109219, 23)\npatch_byte(2056, 68)\npatch_byte(2057, 254)\npatch_byte(2058, 255)\npatch_byte(2059, 20)\npatch_byte(67109220, 71)\npatch_byte(67109221, 0)\npatch_byte(67109222, 0)\npatch_byte(67109223, 53)\npatch_byte(67109224, 135)\npatch_byte(67109225, 1)\npatch_byte(67109226, 0)\npatch_byte(67109227, 52)\npatch_byte(67109228, 224)\npatch_byte(67109229, 3)\npatch_byte(67109230, 20)\npatch_byte(67109231, 42)\npatch_byte(67109232, 243)\npatch_byte(67109233, 3)\npatch_byte(67109234, 7)\npatch_byte(67109235, 42)\npatch_byte(67109236, 127)\npatch_byte(67109237, 2)\npatch_byte(67109238, 10)\npatch_byte(67109239, 107)\npatch_byte(67109240, 127)\npatch_byte(67109241, 2)\npatch_byte(67109242, 11)\npatch_byte(67109243, 107)\npatch_byte(67109244, 127)\npatch_byte(67109245, 2)\npatch_byte(67109246, 12)\npatch_byte(67109247, 107)\npatch_byte(67109248, 127)\npatch_byte(67109249, 2)\npatch_byte(67109250, 16)\npatch_byte(67109251, 107)\npatch_byte(67109252, 127)\npatch_byte(67109253, 2)\npatch_byte(67109254, 18)\npatch_byte(67109255, 107)\npatch_byte(67109256, 127)\npatch_byte(67109257, 2)\npatch_byte(67109258, 15)\npatch_byte(67109259, 107)\npatch_byte(67109260, 231)\npatch_byte(67109261, 3)\npatch_byte(67109262, 19)\npatch_byte(67109263, 42)\npatch_byte(67109264, 244)\npatch_byte(67109265, 3)\npatch_byte(67109266, 0)\npatch_byte(67109267, 42)\npatch_byte(67109268, 179)\npatch_byte(67109269, 1)\npatch_byte(67109270, 0)\npatch_byte(67109271, 23)\npatch_byte(67109272, 224)\npatch_byte(67109273, 3)\npatch_byte(67109274, 20)\npatch_byte(67109275, 42)\npatch_byte(67109276, 243)\npatch_byte(67109277, 3)\npatch_byte(67109278, 7)\npatch_byte(67109279, 42)\npatch_byte(67109280, 127)\npatch_byte(67109281, 2)\npatch_byte(67109282, 10)\npatch_byte(67109283, 107)\npatch_byte(67109284, 127)\npatch_byte(67109285, 2)\npatch_byte(67109286, 14)\npatch_byte(67109287, 107)\npatch_byte(67109288, 231)\npatch_byte(67109289, 3)\npatch_byte(67109290, 19)\npatch_byte(67109291, 42)\npatch_byte(67109292, 244)\npatch_byte(67109293, 3)\npatch_byte(67109294, 0)\npatch_byte(67109295, 42)\npatch_byte(67109296, 131)\npatch_byte(67109297, 1)\npatch_byte(67109298, 0)\npatch_byte(67109299, 23)\npatch_byte(1952, 231)\npatch_byte(1953, 23)\npatch_byte(1954, 159)\npatch_byte(1955, 26)\npatch_byte(1960, 111)\npatch_byte(1961, 254)\npatch_byte(1962, 255)\npatch_byte(1963, 20)\npatch_byte(67109300, 224)\npatch_byte(67109301, 3)\npatch_byte(67109302, 20)\npatch_byte(67109303, 42)\npatch_byte(67109304, 243)\npatch_byte(67109305, 3)\npatch_byte(67109306, 7)\npatch_byte(67109307, 42)\npatch_byte(67109308, 127)\npatch_byte(67109309, 2)\npatch_byte(67109310, 10)\npatch_byte(67109311, 107)\npatch_byte(67109312, 127)\npatch_byte(67109313, 2)\npatch_byte(67109314, 11)\npatch_byte(67109315, 107)\npatch_byte(67109316, 127)\npatch_byte(67109317, 2)\npatch_byte(67109318, 12)\npatch_byte(67109319, 107)\npatch_byte(67109320, 127)\npatch_byte(67109321, 2)\npatch_byte(67109322, 16)\npatch_byte(67109323, 107)\npatch_byte(67109324, 127)\npatch_byte(67109325, 2)\npatch_byte(67109326, 18)\npatch_byte(67109327, 107)\npatch_byte(67109328, 127)\npatch_byte(67109329, 2)\npatch_byte(67109330, 2)\npatch_byte(67109331, 107)\npatch_byte(67109332, 135)\npatch_byte(67109333, 129)\npatch_byte(67109334, 186)\npatch_byte(67109335, 82)\npatch_byte(67109336, 135)\npatch_byte(67109337, 12)\npatch_byte(67109338, 143)\npatch_byte(67109339, 114)\npatch_byte(67109340, 244)\npatch_byte(67109341, 3)\npatch_byte(67109342, 1)\npatch_byte(67109343, 42)\npatch_byte(67109344, 127)\npatch_byte(67109345, 2)\npatch_byte(67109346, 4)\npatch_byte(67109347, 107)\npatch_byte(67109348, 135)\npatch_byte(67109349, 129)\npatch_byte(67109350, 186)\npatch_byte(67109351, 82)\npatch_byte(67109352, 135)\npatch_byte(67109353, 12)\npatch_byte(67109354, 143)\npatch_byte(67109355, 114)\npatch_byte(67109356, 244)\npatch_byte(67109357, 3)\npatch_byte(67109358, 5)\npatch_byte(67109359, 42)\npatch_byte(67109360, 127)\npatch_byte(67109361, 2)\npatch_byte(67109362, 9)\npatch_byte(67109363, 107)\npatch_byte(67109364, 231)\npatch_byte(67109365, 3)\npatch_byte(67109366, 19)\npatch_byte(67109367, 42)\npatch_byte(67109368, 244)\npatch_byte(67109369, 3)\npatch_byte(67109370, 0)\npatch_byte(67109371, 42)\npatch_byte(67109372, 148)\npatch_byte(67109373, 1)\npatch_byte(67109374, 0)\npatch_byte(67109375, 23)\npatch_byte(2156, 82)\npatch_byte(2157, 254)\npatch_byte(2158, 255)\npatch_byte(2159, 20)\npatch_byte(67109376, 224)\npatch_byte(67109377, 3)\npatch_byte(67109378, 20)\npatch_byte(67109379, 42)\npatch_byte(67109380, 243)\npatch_byte(67109381, 3)\npatch_byte(67109382, 7)\npatch_byte(67109383, 42)\npatch_byte(67109384, 127)\npatch_byte(67109385, 2)\npatch_byte(67109386, 10)\npatch_byte(67109387, 107)\npatch_byte(67109388, 127)\npatch_byte(67109389, 2)\npatch_byte(67109390, 11)\npatch_byte(67109391, 107)\npatch_byte(67109392, 127)\npatch_byte(67109393, 2)\npatch_byte(67109394, 12)\npatch_byte(67109395, 107)\npatch_byte(67109396, 127)\npatch_byte(67109397, 2)\npatch_byte(67109398, 16)\npatch_byte(67109399, 107)\npatch_byte(67109400, 127)\npatch_byte(67109401, 2)\npatch_byte(67109402, 18)\npatch_byte(67109403, 107)\npatch_byte(67109404, 127)\npatch_byte(67109405, 2)\npatch_byte(67109406, 2)\npatch_byte(67109407, 107)\npatch_byte(67109408, 135)\npatch_byte(67109409, 129)\npatch_byte(67109410, 186)\npatch_byte(67109411, 82)\npatch_byte(67109412, 135)\npatch_byte(67109413, 12)\npatch_byte(67109414, 143)\npatch_byte(67109415, 114)\npatch_byte(67109416, 244)\npatch_byte(67109417, 3)\npatch_byte(67109418, 1)\npatch_byte(67109419, 42)\npatch_byte(67109420, 127)\npatch_byte(67109421, 2)\npatch_byte(67109422, 4)\npatch_byte(67109423, 107)\npatch_byte(67109424, 135)\npatch_byte(67109425, 129)\npatch_byte(67109426, 186)\npatch_byte(67109427, 82)\npatch_byte(67109428, 135)\npatch_byte(67109429, 12)\npatch_byte(67109430, 143)\npatch_byte(67109431, 114)\npatch_byte(67109432, 244)\npatch_byte(67109433, 3)\npatch_byte(67109434, 5)\npatch_byte(67109435, 42)\npatch_byte(67109436, 127)\npatch_byte(67109437, 2)\npatch_byte(67109438, 9)\npatch_byte(67109439, 107)\npatch_byte(67109440, 231)\npatch_byte(67109441, 3)\npatch_byte(67109442, 19)\npatch_byte(67109443, 42)\npatch_byte(67109444, 244)\npatch_byte(67109445, 3)\npatch_byte(67109446, 0)\npatch_byte(67109447, 42)\npatch_byte(67109448, 129)\npatch_byte(67109449, 1)\npatch_byte(67109450, 0)\npatch_byte(67109451, 23)\npatch_byte(2104, 114)\npatch_byte(2105, 254)\npatch_byte(2106, 255)\npatch_byte(2107, 20)\n",
"<import token>\nbase = 67108864\nseg_size = 4194304\naddress = base\nadd_segm_ex(base, base + seg_size, 1, 2, 1, 2, ADDSEG_NOSREG)\nset_segm_name(base, 'patch')\nset_segm_class(base, 'CODE')\nset_segm_type(base, 2)\npatch_byte(67108864, 224)\npatch_byte(67108865, 3)\npatch_byte(67108866, 20)\npatch_byte(67108867, 42)\npatch_byte(67108868, 243)\npatch_byte(67108869, 3)\npatch_byte(67108870, 7)\npatch_byte(67108871, 42)\npatch_byte(67108872, 127)\npatch_byte(67108873, 2)\npatch_byte(67108874, 10)\npatch_byte(67108875, 107)\npatch_byte(67108876, 127)\npatch_byte(67108877, 2)\npatch_byte(67108878, 11)\npatch_byte(67108879, 107)\npatch_byte(67108880, 127)\npatch_byte(67108881, 2)\npatch_byte(67108882, 8)\npatch_byte(67108883, 107)\npatch_byte(67108884, 231)\npatch_byte(67108885, 3)\npatch_byte(67108886, 19)\npatch_byte(67108887, 42)\npatch_byte(67108888, 244)\npatch_byte(67108889, 3)\npatch_byte(67108890, 0)\npatch_byte(67108891, 42)\npatch_byte(67108892, 237)\npatch_byte(67108893, 1)\npatch_byte(67108894, 0)\npatch_byte(67108895, 23)\npatch_byte(1876, 43)\npatch_byte(1877, 254)\npatch_byte(1878, 255)\npatch_byte(1879, 20)\npatch_byte(67108896, 71)\npatch_byte(67108897, 0)\npatch_byte(67108898, 0)\npatch_byte(67108899, 53)\npatch_byte(67108900, 167)\npatch_byte(67108901, 1)\npatch_byte(67108902, 0)\npatch_byte(67108903, 52)\npatch_byte(67108904, 224)\npatch_byte(67108905, 3)\npatch_byte(67108906, 20)\npatch_byte(67108907, 42)\npatch_byte(67108908, 243)\npatch_byte(67108909, 3)\npatch_byte(67108910, 7)\npatch_byte(67108911, 42)\npatch_byte(67108912, 127)\npatch_byte(67108913, 2)\npatch_byte(67108914, 10)\npatch_byte(67108915, 107)\npatch_byte(67108916, 127)\npatch_byte(67108917, 2)\npatch_byte(67108918, 11)\npatch_byte(67108919, 107)\npatch_byte(67108920, 127)\npatch_byte(67108921, 2)\npatch_byte(67108922, 12)\npatch_byte(67108923, 107)\npatch_byte(67108924, 127)\npatch_byte(67108925, 2)\npatch_byte(67108926, 16)\npatch_byte(67108927, 107)\npatch_byte(67108928, 127)\npatch_byte(67108929, 2)\npatch_byte(67108930, 18)\npatch_byte(67108931, 107)\npatch_byte(67108932, 127)\npatch_byte(67108933, 2)\npatch_byte(67108934, 2)\npatch_byte(67108935, 107)\npatch_byte(67108936, 135)\npatch_byte(67108937, 129)\npatch_byte(67108938, 186)\npatch_byte(67108939, 82)\npatch_byte(67108940, 135)\npatch_byte(67108941, 12)\npatch_byte(67108942, 143)\npatch_byte(67108943, 114)\npatch_byte(67108944, 244)\npatch_byte(67108945, 3)\npatch_byte(67108946, 1)\npatch_byte(67108947, 42)\npatch_byte(67108948, 245)\npatch_byte(67108949, 1)\npatch_byte(67108950, 0)\npatch_byte(67108951, 23)\npatch_byte(67108952, 224)\npatch_byte(67108953, 3)\npatch_byte(67108954, 20)\npatch_byte(67108955, 42)\npatch_byte(67108956, 243)\npatch_byte(67108957, 3)\npatch_byte(67108958, 7)\npatch_byte(67108959, 42)\npatch_byte(67108960, 127)\npatch_byte(67108961, 2)\npatch_byte(67108962, 10)\npatch_byte(67108963, 107)\npatch_byte(67108964, 127)\npatch_byte(67108965, 2)\npatch_byte(67108966, 11)\npatch_byte(67108967, 107)\npatch_byte(67108968, 127)\npatch_byte(67108969, 2)\npatch_byte(67108970, 12)\npatch_byte(67108971, 107)\npatch_byte(67108972, 127)\npatch_byte(67108973, 2)\npatch_byte(67108974, 16)\npatch_byte(67108975, 107)\npatch_byte(67108976, 127)\npatch_byte(67108977, 2)\npatch_byte(67108978, 17)\npatch_byte(67108979, 107)\npatch_byte(67108980, 231)\npatch_byte(67108981, 3)\npatch_byte(67108982, 19)\npatch_byte(67108983, 42)\npatch_byte(67108984, 244)\npatch_byte(67108985, 3)\npatch_byte(67108986, 0)\npatch_byte(67108987, 42)\npatch_byte(67108988, 224)\npatch_byte(67108989, 1)\npatch_byte(67108990, 0)\npatch_byte(67108991, 23)\npatch_byte(1888, 231)\npatch_byte(1889, 23)\npatch_byte(1890, 159)\npatch_byte(1891, 26)\npatch_byte(1892, 47)\npatch_byte(1893, 254)\npatch_byte(1894, 255)\npatch_byte(1895, 20)\npatch_byte(67108992, 71)\npatch_byte(67108993, 0)\npatch_byte(67108994, 0)\npatch_byte(67108995, 53)\npatch_byte(67108996, 39)\npatch_byte(67108997, 1)\npatch_byte(67108998, 0)\npatch_byte(67108999, 52)\npatch_byte(67109000, 224)\npatch_byte(67109001, 3)\npatch_byte(67109002, 20)\npatch_byte(67109003, 42)\npatch_byte(67109004, 243)\npatch_byte(67109005, 3)\npatch_byte(67109006, 7)\npatch_byte(67109007, 42)\npatch_byte(67109008, 127)\npatch_byte(67109009, 2)\npatch_byte(67109010, 10)\npatch_byte(67109011, 107)\npatch_byte(67109012, 127)\npatch_byte(67109013, 2)\npatch_byte(67109014, 11)\npatch_byte(67109015, 107)\npatch_byte(67109016, 127)\npatch_byte(67109017, 2)\npatch_byte(67109018, 12)\npatch_byte(67109019, 107)\npatch_byte(67109020, 127)\npatch_byte(67109021, 2)\npatch_byte(67109022, 16)\npatch_byte(67109023, 107)\npatch_byte(67109024, 127)\npatch_byte(67109025, 2)\npatch_byte(67109026, 18)\npatch_byte(67109027, 107)\npatch_byte(67109028, 220)\npatch_byte(67109029, 1)\npatch_byte(67109030, 0)\npatch_byte(67109031, 23)\npatch_byte(67109032, 224)\npatch_byte(67109033, 3)\npatch_byte(67109034, 20)\npatch_byte(67109035, 42)\npatch_byte(67109036, 243)\npatch_byte(67109037, 3)\npatch_byte(67109038, 7)\npatch_byte(67109039, 42)\npatch_byte(67109040, 127)\npatch_byte(67109041, 2)\npatch_byte(67109042, 10)\npatch_byte(67109043, 107)\npatch_byte(67109044, 127)\npatch_byte(67109045, 2)\npatch_byte(67109046, 11)\npatch_byte(67109047, 107)\npatch_byte(67109048, 127)\npatch_byte(67109049, 2)\npatch_byte(67109050, 12)\npatch_byte(67109051, 107)\npatch_byte(67109052, 127)\npatch_byte(67109053, 2)\npatch_byte(67109054, 13)\npatch_byte(67109055, 107)\npatch_byte(67109056, 231)\npatch_byte(67109057, 3)\npatch_byte(67109058, 19)\npatch_byte(67109059, 42)\npatch_byte(67109060, 244)\npatch_byte(67109061, 3)\npatch_byte(67109062, 0)\npatch_byte(67109063, 42)\npatch_byte(67109064, 180)\npatch_byte(67109065, 1)\npatch_byte(67109066, 0)\npatch_byte(67109067, 23)\npatch_byte(2008, 231)\npatch_byte(2009, 23)\npatch_byte(2010, 159)\npatch_byte(2011, 26)\npatch_byte(2016, 40)\npatch_byte(2017, 254)\npatch_byte(2018, 255)\npatch_byte(2019, 20)\npatch_byte(67109068, 224)\npatch_byte(67109069, 3)\npatch_byte(67109070, 20)\npatch_byte(67109071, 42)\npatch_byte(67109072, 243)\npatch_byte(67109073, 3)\npatch_byte(67109074, 7)\npatch_byte(67109075, 42)\npatch_byte(67109076, 127)\npatch_byte(67109077, 2)\npatch_byte(67109078, 10)\npatch_byte(67109079, 107)\npatch_byte(67109080, 127)\npatch_byte(67109081, 2)\npatch_byte(67109082, 11)\npatch_byte(67109083, 107)\npatch_byte(67109084, 127)\npatch_byte(67109085, 2)\npatch_byte(67109086, 12)\npatch_byte(67109087, 107)\npatch_byte(67109088, 127)\npatch_byte(67109089, 2)\npatch_byte(67109090, 16)\npatch_byte(67109091, 107)\npatch_byte(67109092, 127)\npatch_byte(67109093, 2)\npatch_byte(67109094, 18)\npatch_byte(67109095, 107)\npatch_byte(67109096, 127)\npatch_byte(67109097, 2)\npatch_byte(67109098, 2)\npatch_byte(67109099, 107)\npatch_byte(67109100, 135)\npatch_byte(67109101, 129)\npatch_byte(67109102, 186)\npatch_byte(67109103, 82)\npatch_byte(67109104, 135)\npatch_byte(67109105, 12)\npatch_byte(67109106, 143)\npatch_byte(67109107, 114)\npatch_byte(67109108, 244)\npatch_byte(67109109, 3)\npatch_byte(67109110, 1)\npatch_byte(67109111, 42)\npatch_byte(67109112, 127)\npatch_byte(67109113, 2)\npatch_byte(67109114, 4)\npatch_byte(67109115, 107)\npatch_byte(67109116, 135)\npatch_byte(67109117, 129)\npatch_byte(67109118, 186)\npatch_byte(67109119, 82)\npatch_byte(67109120, 135)\npatch_byte(67109121, 12)\npatch_byte(67109122, 143)\npatch_byte(67109123, 114)\npatch_byte(67109124, 244)\npatch_byte(67109125, 3)\npatch_byte(67109126, 5)\npatch_byte(67109127, 42)\npatch_byte(67109128, 127)\npatch_byte(67109129, 2)\npatch_byte(67109130, 9)\npatch_byte(67109131, 107)\npatch_byte(67109132, 231)\npatch_byte(67109133, 3)\npatch_byte(67109134, 19)\npatch_byte(67109135, 42)\npatch_byte(67109136, 244)\npatch_byte(67109137, 3)\npatch_byte(67109138, 0)\npatch_byte(67109139, 42)\npatch_byte(67109140, 206)\npatch_byte(67109141, 1)\npatch_byte(67109142, 0)\npatch_byte(67109143, 23)\npatch_byte(2084, 42)\npatch_byte(2085, 254)\npatch_byte(2086, 255)\npatch_byte(2087, 20)\npatch_byte(67109144, 224)\npatch_byte(67109145, 3)\npatch_byte(67109146, 20)\npatch_byte(67109147, 42)\npatch_byte(67109148, 243)\npatch_byte(67109149, 3)\npatch_byte(67109150, 7)\npatch_byte(67109151, 42)\npatch_byte(67109152, 127)\npatch_byte(67109153, 2)\npatch_byte(67109154, 10)\npatch_byte(67109155, 107)\npatch_byte(67109156, 127)\npatch_byte(67109157, 2)\npatch_byte(67109158, 11)\npatch_byte(67109159, 107)\npatch_byte(67109160, 127)\npatch_byte(67109161, 2)\npatch_byte(67109162, 12)\npatch_byte(67109163, 107)\npatch_byte(67109164, 127)\npatch_byte(67109165, 2)\npatch_byte(67109166, 16)\npatch_byte(67109167, 107)\npatch_byte(67109168, 127)\npatch_byte(67109169, 2)\npatch_byte(67109170, 18)\npatch_byte(67109171, 107)\npatch_byte(67109172, 127)\npatch_byte(67109173, 2)\npatch_byte(67109174, 2)\npatch_byte(67109175, 107)\npatch_byte(67109176, 135)\npatch_byte(67109177, 129)\npatch_byte(67109178, 186)\npatch_byte(67109179, 82)\npatch_byte(67109180, 135)\npatch_byte(67109181, 12)\npatch_byte(67109182, 143)\npatch_byte(67109183, 114)\npatch_byte(67109184, 244)\npatch_byte(67109185, 3)\npatch_byte(67109186, 1)\npatch_byte(67109187, 42)\npatch_byte(67109188, 127)\npatch_byte(67109189, 2)\npatch_byte(67109190, 4)\npatch_byte(67109191, 107)\npatch_byte(67109192, 135)\npatch_byte(67109193, 129)\npatch_byte(67109194, 186)\npatch_byte(67109195, 82)\npatch_byte(67109196, 135)\npatch_byte(67109197, 12)\npatch_byte(67109198, 143)\npatch_byte(67109199, 114)\npatch_byte(67109200, 244)\npatch_byte(67109201, 3)\npatch_byte(67109202, 5)\npatch_byte(67109203, 42)\npatch_byte(67109204, 127)\npatch_byte(67109205, 2)\npatch_byte(67109206, 9)\npatch_byte(67109207, 107)\npatch_byte(67109208, 231)\npatch_byte(67109209, 3)\npatch_byte(67109210, 19)\npatch_byte(67109211, 42)\npatch_byte(67109212, 244)\npatch_byte(67109213, 3)\npatch_byte(67109214, 0)\npatch_byte(67109215, 42)\npatch_byte(67109216, 187)\npatch_byte(67109217, 1)\npatch_byte(67109218, 0)\npatch_byte(67109219, 23)\npatch_byte(2056, 68)\npatch_byte(2057, 254)\npatch_byte(2058, 255)\npatch_byte(2059, 20)\npatch_byte(67109220, 71)\npatch_byte(67109221, 0)\npatch_byte(67109222, 0)\npatch_byte(67109223, 53)\npatch_byte(67109224, 135)\npatch_byte(67109225, 1)\npatch_byte(67109226, 0)\npatch_byte(67109227, 52)\npatch_byte(67109228, 224)\npatch_byte(67109229, 3)\npatch_byte(67109230, 20)\npatch_byte(67109231, 42)\npatch_byte(67109232, 243)\npatch_byte(67109233, 3)\npatch_byte(67109234, 7)\npatch_byte(67109235, 42)\npatch_byte(67109236, 127)\npatch_byte(67109237, 2)\npatch_byte(67109238, 10)\npatch_byte(67109239, 107)\npatch_byte(67109240, 127)\npatch_byte(67109241, 2)\npatch_byte(67109242, 11)\npatch_byte(67109243, 107)\npatch_byte(67109244, 127)\npatch_byte(67109245, 2)\npatch_byte(67109246, 12)\npatch_byte(67109247, 107)\npatch_byte(67109248, 127)\npatch_byte(67109249, 2)\npatch_byte(67109250, 16)\npatch_byte(67109251, 107)\npatch_byte(67109252, 127)\npatch_byte(67109253, 2)\npatch_byte(67109254, 18)\npatch_byte(67109255, 107)\npatch_byte(67109256, 127)\npatch_byte(67109257, 2)\npatch_byte(67109258, 15)\npatch_byte(67109259, 107)\npatch_byte(67109260, 231)\npatch_byte(67109261, 3)\npatch_byte(67109262, 19)\npatch_byte(67109263, 42)\npatch_byte(67109264, 244)\npatch_byte(67109265, 3)\npatch_byte(67109266, 0)\npatch_byte(67109267, 42)\npatch_byte(67109268, 179)\npatch_byte(67109269, 1)\npatch_byte(67109270, 0)\npatch_byte(67109271, 23)\npatch_byte(67109272, 224)\npatch_byte(67109273, 3)\npatch_byte(67109274, 20)\npatch_byte(67109275, 42)\npatch_byte(67109276, 243)\npatch_byte(67109277, 3)\npatch_byte(67109278, 7)\npatch_byte(67109279, 42)\npatch_byte(67109280, 127)\npatch_byte(67109281, 2)\npatch_byte(67109282, 10)\npatch_byte(67109283, 107)\npatch_byte(67109284, 127)\npatch_byte(67109285, 2)\npatch_byte(67109286, 14)\npatch_byte(67109287, 107)\npatch_byte(67109288, 231)\npatch_byte(67109289, 3)\npatch_byte(67109290, 19)\npatch_byte(67109291, 42)\npatch_byte(67109292, 244)\npatch_byte(67109293, 3)\npatch_byte(67109294, 0)\npatch_byte(67109295, 42)\npatch_byte(67109296, 131)\npatch_byte(67109297, 1)\npatch_byte(67109298, 0)\npatch_byte(67109299, 23)\npatch_byte(1952, 231)\npatch_byte(1953, 23)\npatch_byte(1954, 159)\npatch_byte(1955, 26)\npatch_byte(1960, 111)\npatch_byte(1961, 254)\npatch_byte(1962, 255)\npatch_byte(1963, 20)\npatch_byte(67109300, 224)\npatch_byte(67109301, 3)\npatch_byte(67109302, 20)\npatch_byte(67109303, 42)\npatch_byte(67109304, 243)\npatch_byte(67109305, 3)\npatch_byte(67109306, 7)\npatch_byte(67109307, 42)\npatch_byte(67109308, 127)\npatch_byte(67109309, 2)\npatch_byte(67109310, 10)\npatch_byte(67109311, 107)\npatch_byte(67109312, 127)\npatch_byte(67109313, 2)\npatch_byte(67109314, 11)\npatch_byte(67109315, 107)\npatch_byte(67109316, 127)\npatch_byte(67109317, 2)\npatch_byte(67109318, 12)\npatch_byte(67109319, 107)\npatch_byte(67109320, 127)\npatch_byte(67109321, 2)\npatch_byte(67109322, 16)\npatch_byte(67109323, 107)\npatch_byte(67109324, 127)\npatch_byte(67109325, 2)\npatch_byte(67109326, 18)\npatch_byte(67109327, 107)\npatch_byte(67109328, 127)\npatch_byte(67109329, 2)\npatch_byte(67109330, 2)\npatch_byte(67109331, 107)\npatch_byte(67109332, 135)\npatch_byte(67109333, 129)\npatch_byte(67109334, 186)\npatch_byte(67109335, 82)\npatch_byte(67109336, 135)\npatch_byte(67109337, 12)\npatch_byte(67109338, 143)\npatch_byte(67109339, 114)\npatch_byte(67109340, 244)\npatch_byte(67109341, 3)\npatch_byte(67109342, 1)\npatch_byte(67109343, 42)\npatch_byte(67109344, 127)\npatch_byte(67109345, 2)\npatch_byte(67109346, 4)\npatch_byte(67109347, 107)\npatch_byte(67109348, 135)\npatch_byte(67109349, 129)\npatch_byte(67109350, 186)\npatch_byte(67109351, 82)\npatch_byte(67109352, 135)\npatch_byte(67109353, 12)\npatch_byte(67109354, 143)\npatch_byte(67109355, 114)\npatch_byte(67109356, 244)\npatch_byte(67109357, 3)\npatch_byte(67109358, 5)\npatch_byte(67109359, 42)\npatch_byte(67109360, 127)\npatch_byte(67109361, 2)\npatch_byte(67109362, 9)\npatch_byte(67109363, 107)\npatch_byte(67109364, 231)\npatch_byte(67109365, 3)\npatch_byte(67109366, 19)\npatch_byte(67109367, 42)\npatch_byte(67109368, 244)\npatch_byte(67109369, 3)\npatch_byte(67109370, 0)\npatch_byte(67109371, 42)\npatch_byte(67109372, 148)\npatch_byte(67109373, 1)\npatch_byte(67109374, 0)\npatch_byte(67109375, 23)\npatch_byte(2156, 82)\npatch_byte(2157, 254)\npatch_byte(2158, 255)\npatch_byte(2159, 20)\npatch_byte(67109376, 224)\npatch_byte(67109377, 3)\npatch_byte(67109378, 20)\npatch_byte(67109379, 42)\npatch_byte(67109380, 243)\npatch_byte(67109381, 3)\npatch_byte(67109382, 7)\npatch_byte(67109383, 42)\npatch_byte(67109384, 127)\npatch_byte(67109385, 2)\npatch_byte(67109386, 10)\npatch_byte(67109387, 107)\npatch_byte(67109388, 127)\npatch_byte(67109389, 2)\npatch_byte(67109390, 11)\npatch_byte(67109391, 107)\npatch_byte(67109392, 127)\npatch_byte(67109393, 2)\npatch_byte(67109394, 12)\npatch_byte(67109395, 107)\npatch_byte(67109396, 127)\npatch_byte(67109397, 2)\npatch_byte(67109398, 16)\npatch_byte(67109399, 107)\npatch_byte(67109400, 127)\npatch_byte(67109401, 2)\npatch_byte(67109402, 18)\npatch_byte(67109403, 107)\npatch_byte(67109404, 127)\npatch_byte(67109405, 2)\npatch_byte(67109406, 2)\npatch_byte(67109407, 107)\npatch_byte(67109408, 135)\npatch_byte(67109409, 129)\npatch_byte(67109410, 186)\npatch_byte(67109411, 82)\npatch_byte(67109412, 135)\npatch_byte(67109413, 12)\npatch_byte(67109414, 143)\npatch_byte(67109415, 114)\npatch_byte(67109416, 244)\npatch_byte(67109417, 3)\npatch_byte(67109418, 1)\npatch_byte(67109419, 42)\npatch_byte(67109420, 127)\npatch_byte(67109421, 2)\npatch_byte(67109422, 4)\npatch_byte(67109423, 107)\npatch_byte(67109424, 135)\npatch_byte(67109425, 129)\npatch_byte(67109426, 186)\npatch_byte(67109427, 82)\npatch_byte(67109428, 135)\npatch_byte(67109429, 12)\npatch_byte(67109430, 143)\npatch_byte(67109431, 114)\npatch_byte(67109432, 244)\npatch_byte(67109433, 3)\npatch_byte(67109434, 5)\npatch_byte(67109435, 42)\npatch_byte(67109436, 127)\npatch_byte(67109437, 2)\npatch_byte(67109438, 9)\npatch_byte(67109439, 107)\npatch_byte(67109440, 231)\npatch_byte(67109441, 3)\npatch_byte(67109442, 19)\npatch_byte(67109443, 42)\npatch_byte(67109444, 244)\npatch_byte(67109445, 3)\npatch_byte(67109446, 0)\npatch_byte(67109447, 42)\npatch_byte(67109448, 129)\npatch_byte(67109449, 1)\npatch_byte(67109450, 0)\npatch_byte(67109451, 23)\npatch_byte(2104, 114)\npatch_byte(2105, 254)\npatch_byte(2106, 255)\npatch_byte(2107, 20)\n",
"<import token>\n<assignment token>\nadd_segm_ex(base, base + seg_size, 1, 2, 1, 2, ADDSEG_NOSREG)\nset_segm_name(base, 'patch')\nset_segm_class(base, 'CODE')\nset_segm_type(base, 2)\npatch_byte(67108864, 224)\npatch_byte(67108865, 3)\npatch_byte(67108866, 20)\npatch_byte(67108867, 42)\npatch_byte(67108868, 243)\npatch_byte(67108869, 3)\npatch_byte(67108870, 7)\npatch_byte(67108871, 42)\npatch_byte(67108872, 127)\npatch_byte(67108873, 2)\npatch_byte(67108874, 10)\npatch_byte(67108875, 107)\npatch_byte(67108876, 127)\npatch_byte(67108877, 2)\npatch_byte(67108878, 11)\npatch_byte(67108879, 107)\npatch_byte(67108880, 127)\npatch_byte(67108881, 2)\npatch_byte(67108882, 8)\npatch_byte(67108883, 107)\npatch_byte(67108884, 231)\npatch_byte(67108885, 3)\npatch_byte(67108886, 19)\npatch_byte(67108887, 42)\npatch_byte(67108888, 244)\npatch_byte(67108889, 3)\npatch_byte(67108890, 0)\npatch_byte(67108891, 42)\npatch_byte(67108892, 237)\npatch_byte(67108893, 1)\npatch_byte(67108894, 0)\npatch_byte(67108895, 23)\npatch_byte(1876, 43)\npatch_byte(1877, 254)\npatch_byte(1878, 255)\npatch_byte(1879, 20)\npatch_byte(67108896, 71)\npatch_byte(67108897, 0)\npatch_byte(67108898, 0)\npatch_byte(67108899, 53)\npatch_byte(67108900, 167)\npatch_byte(67108901, 1)\npatch_byte(67108902, 0)\npatch_byte(67108903, 52)\npatch_byte(67108904, 224)\npatch_byte(67108905, 3)\npatch_byte(67108906, 20)\npatch_byte(67108907, 42)\npatch_byte(67108908, 243)\npatch_byte(67108909, 3)\npatch_byte(67108910, 7)\npatch_byte(67108911, 42)\npatch_byte(67108912, 127)\npatch_byte(67108913, 2)\npatch_byte(67108914, 10)\npatch_byte(67108915, 107)\npatch_byte(67108916, 127)\npatch_byte(67108917, 2)\npatch_byte(67108918, 11)\npatch_byte(67108919, 107)\npatch_byte(67108920, 127)\npatch_byte(67108921, 2)\npatch_byte(67108922, 12)\npatch_byte(67108923, 107)\npatch_byte(67108924, 127)\npatch_byte(67108925, 2)\npatch_byte(67108926, 16)\npatch_byte(67108927, 107)\npatch_byte(67108928, 127)\npatch_byte(67108929, 2)\npatch_byte(67108930, 18)\npatch_byte(67108931, 107)\npatch_byte(67108932, 127)\npatch_byte(67108933, 2)\npatch_byte(67108934, 2)\npatch_byte(67108935, 107)\npatch_byte(67108936, 135)\npatch_byte(67108937, 129)\npatch_byte(67108938, 186)\npatch_byte(67108939, 82)\npatch_byte(67108940, 135)\npatch_byte(67108941, 12)\npatch_byte(67108942, 143)\npatch_byte(67108943, 114)\npatch_byte(67108944, 244)\npatch_byte(67108945, 3)\npatch_byte(67108946, 1)\npatch_byte(67108947, 42)\npatch_byte(67108948, 245)\npatch_byte(67108949, 1)\npatch_byte(67108950, 0)\npatch_byte(67108951, 23)\npatch_byte(67108952, 224)\npatch_byte(67108953, 3)\npatch_byte(67108954, 20)\npatch_byte(67108955, 42)\npatch_byte(67108956, 243)\npatch_byte(67108957, 3)\npatch_byte(67108958, 7)\npatch_byte(67108959, 42)\npatch_byte(67108960, 127)\npatch_byte(67108961, 2)\npatch_byte(67108962, 10)\npatch_byte(67108963, 107)\npatch_byte(67108964, 127)\npatch_byte(67108965, 2)\npatch_byte(67108966, 11)\npatch_byte(67108967, 107)\npatch_byte(67108968, 127)\npatch_byte(67108969, 2)\npatch_byte(67108970, 12)\npatch_byte(67108971, 107)\npatch_byte(67108972, 127)\npatch_byte(67108973, 2)\npatch_byte(67108974, 16)\npatch_byte(67108975, 107)\npatch_byte(67108976, 127)\npatch_byte(67108977, 2)\npatch_byte(67108978, 17)\npatch_byte(67108979, 107)\npatch_byte(67108980, 231)\npatch_byte(67108981, 3)\npatch_byte(67108982, 19)\npatch_byte(67108983, 42)\npatch_byte(67108984, 244)\npatch_byte(67108985, 3)\npatch_byte(67108986, 0)\npatch_byte(67108987, 42)\npatch_byte(67108988, 224)\npatch_byte(67108989, 1)\npatch_byte(67108990, 0)\npatch_byte(67108991, 23)\npatch_byte(1888, 231)\npatch_byte(1889, 23)\npatch_byte(1890, 159)\npatch_byte(1891, 26)\npatch_byte(1892, 47)\npatch_byte(1893, 254)\npatch_byte(1894, 255)\npatch_byte(1895, 20)\npatch_byte(67108992, 71)\npatch_byte(67108993, 0)\npatch_byte(67108994, 0)\npatch_byte(67108995, 53)\npatch_byte(67108996, 39)\npatch_byte(67108997, 1)\npatch_byte(67108998, 0)\npatch_byte(67108999, 52)\npatch_byte(67109000, 224)\npatch_byte(67109001, 3)\npatch_byte(67109002, 20)\npatch_byte(67109003, 42)\npatch_byte(67109004, 243)\npatch_byte(67109005, 3)\npatch_byte(67109006, 7)\npatch_byte(67109007, 42)\npatch_byte(67109008, 127)\npatch_byte(67109009, 2)\npatch_byte(67109010, 10)\npatch_byte(67109011, 107)\npatch_byte(67109012, 127)\npatch_byte(67109013, 2)\npatch_byte(67109014, 11)\npatch_byte(67109015, 107)\npatch_byte(67109016, 127)\npatch_byte(67109017, 2)\npatch_byte(67109018, 12)\npatch_byte(67109019, 107)\npatch_byte(67109020, 127)\npatch_byte(67109021, 2)\npatch_byte(67109022, 16)\npatch_byte(67109023, 107)\npatch_byte(67109024, 127)\npatch_byte(67109025, 2)\npatch_byte(67109026, 18)\npatch_byte(67109027, 107)\npatch_byte(67109028, 220)\npatch_byte(67109029, 1)\npatch_byte(67109030, 0)\npatch_byte(67109031, 23)\npatch_byte(67109032, 224)\npatch_byte(67109033, 3)\npatch_byte(67109034, 20)\npatch_byte(67109035, 42)\npatch_byte(67109036, 243)\npatch_byte(67109037, 3)\npatch_byte(67109038, 7)\npatch_byte(67109039, 42)\npatch_byte(67109040, 127)\npatch_byte(67109041, 2)\npatch_byte(67109042, 10)\npatch_byte(67109043, 107)\npatch_byte(67109044, 127)\npatch_byte(67109045, 2)\npatch_byte(67109046, 11)\npatch_byte(67109047, 107)\npatch_byte(67109048, 127)\npatch_byte(67109049, 2)\npatch_byte(67109050, 12)\npatch_byte(67109051, 107)\npatch_byte(67109052, 127)\npatch_byte(67109053, 2)\npatch_byte(67109054, 13)\npatch_byte(67109055, 107)\npatch_byte(67109056, 231)\npatch_byte(67109057, 3)\npatch_byte(67109058, 19)\npatch_byte(67109059, 42)\npatch_byte(67109060, 244)\npatch_byte(67109061, 3)\npatch_byte(67109062, 0)\npatch_byte(67109063, 42)\npatch_byte(67109064, 180)\npatch_byte(67109065, 1)\npatch_byte(67109066, 0)\npatch_byte(67109067, 23)\npatch_byte(2008, 231)\npatch_byte(2009, 23)\npatch_byte(2010, 159)\npatch_byte(2011, 26)\npatch_byte(2016, 40)\npatch_byte(2017, 254)\npatch_byte(2018, 255)\npatch_byte(2019, 20)\npatch_byte(67109068, 224)\npatch_byte(67109069, 3)\npatch_byte(67109070, 20)\npatch_byte(67109071, 42)\npatch_byte(67109072, 243)\npatch_byte(67109073, 3)\npatch_byte(67109074, 7)\npatch_byte(67109075, 42)\npatch_byte(67109076, 127)\npatch_byte(67109077, 2)\npatch_byte(67109078, 10)\npatch_byte(67109079, 107)\npatch_byte(67109080, 127)\npatch_byte(67109081, 2)\npatch_byte(67109082, 11)\npatch_byte(67109083, 107)\npatch_byte(67109084, 127)\npatch_byte(67109085, 2)\npatch_byte(67109086, 12)\npatch_byte(67109087, 107)\npatch_byte(67109088, 127)\npatch_byte(67109089, 2)\npatch_byte(67109090, 16)\npatch_byte(67109091, 107)\npatch_byte(67109092, 127)\npatch_byte(67109093, 2)\npatch_byte(67109094, 18)\npatch_byte(67109095, 107)\npatch_byte(67109096, 127)\npatch_byte(67109097, 2)\npatch_byte(67109098, 2)\npatch_byte(67109099, 107)\npatch_byte(67109100, 135)\npatch_byte(67109101, 129)\npatch_byte(67109102, 186)\npatch_byte(67109103, 82)\npatch_byte(67109104, 135)\npatch_byte(67109105, 12)\npatch_byte(67109106, 143)\npatch_byte(67109107, 114)\npatch_byte(67109108, 244)\npatch_byte(67109109, 3)\npatch_byte(67109110, 1)\npatch_byte(67109111, 42)\npatch_byte(67109112, 127)\npatch_byte(67109113, 2)\npatch_byte(67109114, 4)\npatch_byte(67109115, 107)\npatch_byte(67109116, 135)\npatch_byte(67109117, 129)\npatch_byte(67109118, 186)\npatch_byte(67109119, 82)\npatch_byte(67109120, 135)\npatch_byte(67109121, 12)\npatch_byte(67109122, 143)\npatch_byte(67109123, 114)\npatch_byte(67109124, 244)\npatch_byte(67109125, 3)\npatch_byte(67109126, 5)\npatch_byte(67109127, 42)\npatch_byte(67109128, 127)\npatch_byte(67109129, 2)\npatch_byte(67109130, 9)\npatch_byte(67109131, 107)\npatch_byte(67109132, 231)\npatch_byte(67109133, 3)\npatch_byte(67109134, 19)\npatch_byte(67109135, 42)\npatch_byte(67109136, 244)\npatch_byte(67109137, 3)\npatch_byte(67109138, 0)\npatch_byte(67109139, 42)\npatch_byte(67109140, 206)\npatch_byte(67109141, 1)\npatch_byte(67109142, 0)\npatch_byte(67109143, 23)\npatch_byte(2084, 42)\npatch_byte(2085, 254)\npatch_byte(2086, 255)\npatch_byte(2087, 20)\npatch_byte(67109144, 224)\npatch_byte(67109145, 3)\npatch_byte(67109146, 20)\npatch_byte(67109147, 42)\npatch_byte(67109148, 243)\npatch_byte(67109149, 3)\npatch_byte(67109150, 7)\npatch_byte(67109151, 42)\npatch_byte(67109152, 127)\npatch_byte(67109153, 2)\npatch_byte(67109154, 10)\npatch_byte(67109155, 107)\npatch_byte(67109156, 127)\npatch_byte(67109157, 2)\npatch_byte(67109158, 11)\npatch_byte(67109159, 107)\npatch_byte(67109160, 127)\npatch_byte(67109161, 2)\npatch_byte(67109162, 12)\npatch_byte(67109163, 107)\npatch_byte(67109164, 127)\npatch_byte(67109165, 2)\npatch_byte(67109166, 16)\npatch_byte(67109167, 107)\npatch_byte(67109168, 127)\npatch_byte(67109169, 2)\npatch_byte(67109170, 18)\npatch_byte(67109171, 107)\npatch_byte(67109172, 127)\npatch_byte(67109173, 2)\npatch_byte(67109174, 2)\npatch_byte(67109175, 107)\npatch_byte(67109176, 135)\npatch_byte(67109177, 129)\npatch_byte(67109178, 186)\npatch_byte(67109179, 82)\npatch_byte(67109180, 135)\npatch_byte(67109181, 12)\npatch_byte(67109182, 143)\npatch_byte(67109183, 114)\npatch_byte(67109184, 244)\npatch_byte(67109185, 3)\npatch_byte(67109186, 1)\npatch_byte(67109187, 42)\npatch_byte(67109188, 127)\npatch_byte(67109189, 2)\npatch_byte(67109190, 4)\npatch_byte(67109191, 107)\npatch_byte(67109192, 135)\npatch_byte(67109193, 129)\npatch_byte(67109194, 186)\npatch_byte(67109195, 82)\npatch_byte(67109196, 135)\npatch_byte(67109197, 12)\npatch_byte(67109198, 143)\npatch_byte(67109199, 114)\npatch_byte(67109200, 244)\npatch_byte(67109201, 3)\npatch_byte(67109202, 5)\npatch_byte(67109203, 42)\npatch_byte(67109204, 127)\npatch_byte(67109205, 2)\npatch_byte(67109206, 9)\npatch_byte(67109207, 107)\npatch_byte(67109208, 231)\npatch_byte(67109209, 3)\npatch_byte(67109210, 19)\npatch_byte(67109211, 42)\npatch_byte(67109212, 244)\npatch_byte(67109213, 3)\npatch_byte(67109214, 0)\npatch_byte(67109215, 42)\npatch_byte(67109216, 187)\npatch_byte(67109217, 1)\npatch_byte(67109218, 0)\npatch_byte(67109219, 23)\npatch_byte(2056, 68)\npatch_byte(2057, 254)\npatch_byte(2058, 255)\npatch_byte(2059, 20)\npatch_byte(67109220, 71)\npatch_byte(67109221, 0)\npatch_byte(67109222, 0)\npatch_byte(67109223, 53)\npatch_byte(67109224, 135)\npatch_byte(67109225, 1)\npatch_byte(67109226, 0)\npatch_byte(67109227, 52)\npatch_byte(67109228, 224)\npatch_byte(67109229, 3)\npatch_byte(67109230, 20)\npatch_byte(67109231, 42)\npatch_byte(67109232, 243)\npatch_byte(67109233, 3)\npatch_byte(67109234, 7)\npatch_byte(67109235, 42)\npatch_byte(67109236, 127)\npatch_byte(67109237, 2)\npatch_byte(67109238, 10)\npatch_byte(67109239, 107)\npatch_byte(67109240, 127)\npatch_byte(67109241, 2)\npatch_byte(67109242, 11)\npatch_byte(67109243, 107)\npatch_byte(67109244, 127)\npatch_byte(67109245, 2)\npatch_byte(67109246, 12)\npatch_byte(67109247, 107)\npatch_byte(67109248, 127)\npatch_byte(67109249, 2)\npatch_byte(67109250, 16)\npatch_byte(67109251, 107)\npatch_byte(67109252, 127)\npatch_byte(67109253, 2)\npatch_byte(67109254, 18)\npatch_byte(67109255, 107)\npatch_byte(67109256, 127)\npatch_byte(67109257, 2)\npatch_byte(67109258, 15)\npatch_byte(67109259, 107)\npatch_byte(67109260, 231)\npatch_byte(67109261, 3)\npatch_byte(67109262, 19)\npatch_byte(67109263, 42)\npatch_byte(67109264, 244)\npatch_byte(67109265, 3)\npatch_byte(67109266, 0)\npatch_byte(67109267, 42)\npatch_byte(67109268, 179)\npatch_byte(67109269, 1)\npatch_byte(67109270, 0)\npatch_byte(67109271, 23)\npatch_byte(67109272, 224)\npatch_byte(67109273, 3)\npatch_byte(67109274, 20)\npatch_byte(67109275, 42)\npatch_byte(67109276, 243)\npatch_byte(67109277, 3)\npatch_byte(67109278, 7)\npatch_byte(67109279, 42)\npatch_byte(67109280, 127)\npatch_byte(67109281, 2)\npatch_byte(67109282, 10)\npatch_byte(67109283, 107)\npatch_byte(67109284, 127)\npatch_byte(67109285, 2)\npatch_byte(67109286, 14)\npatch_byte(67109287, 107)\npatch_byte(67109288, 231)\npatch_byte(67109289, 3)\npatch_byte(67109290, 19)\npatch_byte(67109291, 42)\npatch_byte(67109292, 244)\npatch_byte(67109293, 3)\npatch_byte(67109294, 0)\npatch_byte(67109295, 42)\npatch_byte(67109296, 131)\npatch_byte(67109297, 1)\npatch_byte(67109298, 0)\npatch_byte(67109299, 23)\npatch_byte(1952, 231)\npatch_byte(1953, 23)\npatch_byte(1954, 159)\npatch_byte(1955, 26)\npatch_byte(1960, 111)\npatch_byte(1961, 254)\npatch_byte(1962, 255)\npatch_byte(1963, 20)\npatch_byte(67109300, 224)\npatch_byte(67109301, 3)\npatch_byte(67109302, 20)\npatch_byte(67109303, 42)\npatch_byte(67109304, 243)\npatch_byte(67109305, 3)\npatch_byte(67109306, 7)\npatch_byte(67109307, 42)\npatch_byte(67109308, 127)\npatch_byte(67109309, 2)\npatch_byte(67109310, 10)\npatch_byte(67109311, 107)\npatch_byte(67109312, 127)\npatch_byte(67109313, 2)\npatch_byte(67109314, 11)\npatch_byte(67109315, 107)\npatch_byte(67109316, 127)\npatch_byte(67109317, 2)\npatch_byte(67109318, 12)\npatch_byte(67109319, 107)\npatch_byte(67109320, 127)\npatch_byte(67109321, 2)\npatch_byte(67109322, 16)\npatch_byte(67109323, 107)\npatch_byte(67109324, 127)\npatch_byte(67109325, 2)\npatch_byte(67109326, 18)\npatch_byte(67109327, 107)\npatch_byte(67109328, 127)\npatch_byte(67109329, 2)\npatch_byte(67109330, 2)\npatch_byte(67109331, 107)\npatch_byte(67109332, 135)\npatch_byte(67109333, 129)\npatch_byte(67109334, 186)\npatch_byte(67109335, 82)\npatch_byte(67109336, 135)\npatch_byte(67109337, 12)\npatch_byte(67109338, 143)\npatch_byte(67109339, 114)\npatch_byte(67109340, 244)\npatch_byte(67109341, 3)\npatch_byte(67109342, 1)\npatch_byte(67109343, 42)\npatch_byte(67109344, 127)\npatch_byte(67109345, 2)\npatch_byte(67109346, 4)\npatch_byte(67109347, 107)\npatch_byte(67109348, 135)\npatch_byte(67109349, 129)\npatch_byte(67109350, 186)\npatch_byte(67109351, 82)\npatch_byte(67109352, 135)\npatch_byte(67109353, 12)\npatch_byte(67109354, 143)\npatch_byte(67109355, 114)\npatch_byte(67109356, 244)\npatch_byte(67109357, 3)\npatch_byte(67109358, 5)\npatch_byte(67109359, 42)\npatch_byte(67109360, 127)\npatch_byte(67109361, 2)\npatch_byte(67109362, 9)\npatch_byte(67109363, 107)\npatch_byte(67109364, 231)\npatch_byte(67109365, 3)\npatch_byte(67109366, 19)\npatch_byte(67109367, 42)\npatch_byte(67109368, 244)\npatch_byte(67109369, 3)\npatch_byte(67109370, 0)\npatch_byte(67109371, 42)\npatch_byte(67109372, 148)\npatch_byte(67109373, 1)\npatch_byte(67109374, 0)\npatch_byte(67109375, 23)\npatch_byte(2156, 82)\npatch_byte(2157, 254)\npatch_byte(2158, 255)\npatch_byte(2159, 20)\npatch_byte(67109376, 224)\npatch_byte(67109377, 3)\npatch_byte(67109378, 20)\npatch_byte(67109379, 42)\npatch_byte(67109380, 243)\npatch_byte(67109381, 3)\npatch_byte(67109382, 7)\npatch_byte(67109383, 42)\npatch_byte(67109384, 127)\npatch_byte(67109385, 2)\npatch_byte(67109386, 10)\npatch_byte(67109387, 107)\npatch_byte(67109388, 127)\npatch_byte(67109389, 2)\npatch_byte(67109390, 11)\npatch_byte(67109391, 107)\npatch_byte(67109392, 127)\npatch_byte(67109393, 2)\npatch_byte(67109394, 12)\npatch_byte(67109395, 107)\npatch_byte(67109396, 127)\npatch_byte(67109397, 2)\npatch_byte(67109398, 16)\npatch_byte(67109399, 107)\npatch_byte(67109400, 127)\npatch_byte(67109401, 2)\npatch_byte(67109402, 18)\npatch_byte(67109403, 107)\npatch_byte(67109404, 127)\npatch_byte(67109405, 2)\npatch_byte(67109406, 2)\npatch_byte(67109407, 107)\npatch_byte(67109408, 135)\npatch_byte(67109409, 129)\npatch_byte(67109410, 186)\npatch_byte(67109411, 82)\npatch_byte(67109412, 135)\npatch_byte(67109413, 12)\npatch_byte(67109414, 143)\npatch_byte(67109415, 114)\npatch_byte(67109416, 244)\npatch_byte(67109417, 3)\npatch_byte(67109418, 1)\npatch_byte(67109419, 42)\npatch_byte(67109420, 127)\npatch_byte(67109421, 2)\npatch_byte(67109422, 4)\npatch_byte(67109423, 107)\npatch_byte(67109424, 135)\npatch_byte(67109425, 129)\npatch_byte(67109426, 186)\npatch_byte(67109427, 82)\npatch_byte(67109428, 135)\npatch_byte(67109429, 12)\npatch_byte(67109430, 143)\npatch_byte(67109431, 114)\npatch_byte(67109432, 244)\npatch_byte(67109433, 3)\npatch_byte(67109434, 5)\npatch_byte(67109435, 42)\npatch_byte(67109436, 127)\npatch_byte(67109437, 2)\npatch_byte(67109438, 9)\npatch_byte(67109439, 107)\npatch_byte(67109440, 231)\npatch_byte(67109441, 3)\npatch_byte(67109442, 19)\npatch_byte(67109443, 42)\npatch_byte(67109444, 244)\npatch_byte(67109445, 3)\npatch_byte(67109446, 0)\npatch_byte(67109447, 42)\npatch_byte(67109448, 129)\npatch_byte(67109449, 1)\npatch_byte(67109450, 0)\npatch_byte(67109451, 23)\npatch_byte(2104, 114)\npatch_byte(2105, 254)\npatch_byte(2106, 255)\npatch_byte(2107, 20)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,602 |
6ad01f70554c7289a9ef21bd0052bba5c326741c
|
"""
django-peavy makes it easy to collect and monitor Django application logging.
"""
VERSION = (0, 9, 0)
def get_version():
return '.'.join((str(d) for d in VERSION))
|
[
"\"\"\"\ndjango-peavy makes it easy to collect and monitor Django application logging.\n\"\"\"\nVERSION = (0, 9, 0)\n\n\ndef get_version():\n return '.'.join((str(d) for d in VERSION))\n",
"<docstring token>\nVERSION = 0, 9, 0\n\n\ndef get_version():\n return '.'.join(str(d) for d in VERSION)\n",
"<docstring token>\n<assignment token>\n\n\ndef get_version():\n return '.'.join(str(d) for d in VERSION)\n",
"<docstring token>\n<assignment token>\n<function token>\n"
] | false |
98,603 |
902b77933c6d369a13fa8963737cde0a22c0d01b
|
# This is a one line comment
# Exponentiation
exponents = 10**2
print(exponents)
"""
this is a multi line comment
modulo - returns the remainder
"""
a=100
b=3
remainder = a % b
print("Getting the modulo of " + str(a)+ " % "+ str(b) + " is just like getting the remainder integer when " + str(a) +" is being divided by " + str(b))
print("The modulo of ("+str(a) +" % "+str(b) +") is " + str(remainder))
|
[
"# This is a one line comment\r\n\r\n# Exponentiation\r\n\r\nexponents = 10**2\r\nprint(exponents)\r\n\r\n\r\n\"\"\"\r\nthis is a multi line comment\r\nmodulo - returns the remainder\r\n\"\"\"\r\na=100\r\nb=3\r\nremainder = a % b\r\nprint(\"Getting the modulo of \" + str(a)+ \" % \"+ str(b) + \" is just like getting the remainder integer when \" + str(a) +\" is being divided by \" + str(b))\r\nprint(\"The modulo of (\"+str(a) +\" % \"+str(b) +\") is \" + str(remainder))\r\n",
"exponents = 10 ** 2\nprint(exponents)\n<docstring token>\na = 100\nb = 3\nremainder = a % b\nprint('Getting the modulo of ' + str(a) + ' % ' + str(b) +\n ' is just like getting the remainder integer when ' + str(a) +\n ' is being divided by ' + str(b))\nprint('The modulo of (' + str(a) + ' % ' + str(b) + ') is ' + str(remainder))\n",
"<assignment token>\nprint(exponents)\n<docstring token>\n<assignment token>\nprint('Getting the modulo of ' + str(a) + ' % ' + str(b) +\n ' is just like getting the remainder integer when ' + str(a) +\n ' is being divided by ' + str(b))\nprint('The modulo of (' + str(a) + ' % ' + str(b) + ') is ' + str(remainder))\n",
"<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<code token>\n"
] | false |
98,604 |
3d8e1a2f51e29477e167f54e1e87b1e520f7cb68
|
from django.contrib import admin
# Register your models here.
from .models import Contract, Company, Contract_type, Currency
class ContractAdmin(admin.ModelAdmin):
list_display = ["id", "company", "contract_type", "currency_type", "date_start", "date_end", "contract_value"]
list_display_links = ["id"]
list_editable = ["company", "contract_type", "currency_type", "date_start", "date_end", "contract_value"]
list_filter = ["company", "contract_type", "currency_type", "date_start", "date_end", "contract_value"]
search_fields = ["company", "contract_type", "currency_type", "date_start", "date_end", "contract_value"]
# class Meta:
# model = Contract
admin.site.register(Contract, ContractAdmin)
class CompanyAdmin(admin.ModelAdmin):
list_display = ["id", "name"]
list_display_links = ["id"]
list_editable = ["name"]
list_filter = ["name"]
search_fields = ["name"]
class Meta:
verbose_name_plural = "companies"
admin.site.register(Company, CompanyAdmin)
class Contract_typeAdmin(admin.ModelAdmin):
list_display = ["id", "contract_type"]
list_display_links = ["id"]
list_editable = ["contract_type"]
list_filter = ["contract_type"]
search_fields = ["contract_type"]
admin.site.register(Contract_type, Contract_typeAdmin)
class CurrencyAdmin(admin.ModelAdmin):
list_display = ["id", "currency_type"]
list_display_links = ["id"]
list_editable = ["currency_type"]
list_filter = ["currency_type"]
search_fields = ["currency_type"]
class Meta:
verbose_name_plural = "companies"
admin.site.register(Currency, CurrencyAdmin)
|
[
"from django.contrib import admin\n\n# Register your models here.\nfrom .models import Contract, Company, Contract_type, Currency\n\nclass ContractAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"company\", \"contract_type\", \"currency_type\", \"date_start\", \"date_end\", \"contract_value\"]\n list_display_links = [\"id\"]\n list_editable = [\"company\", \"contract_type\", \"currency_type\", \"date_start\", \"date_end\", \"contract_value\"]\n list_filter = [\"company\", \"contract_type\", \"currency_type\", \"date_start\", \"date_end\", \"contract_value\"]\n\n search_fields = [\"company\", \"contract_type\", \"currency_type\", \"date_start\", \"date_end\", \"contract_value\"]\n\n # class Meta:\n # model = Contract\n\nadmin.site.register(Contract, ContractAdmin)\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"name\"]\n list_display_links = [\"id\"]\n list_editable = [\"name\"]\n list_filter = [\"name\"]\n search_fields = [\"name\"]\n\n class Meta:\n verbose_name_plural = \"companies\"\n\nadmin.site.register(Company, CompanyAdmin)\n\n\nclass Contract_typeAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"contract_type\"]\n list_display_links = [\"id\"]\n list_editable = [\"contract_type\"]\n list_filter = [\"contract_type\"]\n search_fields = [\"contract_type\"]\n\nadmin.site.register(Contract_type, Contract_typeAdmin)\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"currency_type\"]\n list_display_links = [\"id\"]\n list_editable = [\"currency_type\"]\n list_filter = [\"currency_type\"]\n search_fields = [\"currency_type\"]\n\n class Meta:\n verbose_name_plural = \"companies\"\n\nadmin.site.register(Currency, CurrencyAdmin)",
"from django.contrib import admin\nfrom .models import Contract, Company, Contract_type, Currency\n\n\nclass ContractAdmin(admin.ModelAdmin):\n list_display = ['id', 'company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n list_display_links = ['id']\n list_editable = ['company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n list_filter = ['company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n search_fields = ['company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n\n\nadmin.site.register(Contract, ContractAdmin)\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n list_display = ['id', 'name']\n list_display_links = ['id']\n list_editable = ['name']\n list_filter = ['name']\n search_fields = ['name']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\nadmin.site.register(Company, CompanyAdmin)\n\n\nclass Contract_typeAdmin(admin.ModelAdmin):\n list_display = ['id', 'contract_type']\n list_display_links = ['id']\n list_editable = ['contract_type']\n list_filter = ['contract_type']\n search_fields = ['contract_type']\n\n\nadmin.site.register(Contract_type, Contract_typeAdmin)\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['id', 'currency_type']\n list_display_links = ['id']\n list_editable = ['currency_type']\n list_filter = ['currency_type']\n search_fields = ['currency_type']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\nadmin.site.register(Currency, CurrencyAdmin)\n",
"<import token>\n\n\nclass ContractAdmin(admin.ModelAdmin):\n list_display = ['id', 'company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n list_display_links = ['id']\n list_editable = ['company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n list_filter = ['company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n search_fields = ['company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n\n\nadmin.site.register(Contract, ContractAdmin)\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n list_display = ['id', 'name']\n list_display_links = ['id']\n list_editable = ['name']\n list_filter = ['name']\n search_fields = ['name']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\nadmin.site.register(Company, CompanyAdmin)\n\n\nclass Contract_typeAdmin(admin.ModelAdmin):\n list_display = ['id', 'contract_type']\n list_display_links = ['id']\n list_editable = ['contract_type']\n list_filter = ['contract_type']\n search_fields = ['contract_type']\n\n\nadmin.site.register(Contract_type, Contract_typeAdmin)\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['id', 'currency_type']\n list_display_links = ['id']\n list_editable = ['currency_type']\n list_filter = ['currency_type']\n search_fields = ['currency_type']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\nadmin.site.register(Currency, CurrencyAdmin)\n",
"<import token>\n\n\nclass ContractAdmin(admin.ModelAdmin):\n list_display = ['id', 'company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n list_display_links = ['id']\n list_editable = ['company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n list_filter = ['company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n search_fields = ['company', 'contract_type', 'currency_type',\n 'date_start', 'date_end', 'contract_value']\n\n\n<code token>\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n list_display = ['id', 'name']\n list_display_links = ['id']\n list_editable = ['name']\n list_filter = ['name']\n search_fields = ['name']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n\n\nclass Contract_typeAdmin(admin.ModelAdmin):\n list_display = ['id', 'contract_type']\n list_display_links = ['id']\n list_editable = ['contract_type']\n list_filter = ['contract_type']\n search_fields = ['contract_type']\n\n\n<code token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['id', 'currency_type']\n list_display_links = ['id']\n list_editable = ['currency_type']\n list_filter = ['currency_type']\n search_fields = ['currency_type']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n",
"<import token>\n\n\nclass ContractAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<code token>\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n list_display = ['id', 'name']\n list_display_links = ['id']\n list_editable = ['name']\n list_filter = ['name']\n search_fields = ['name']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n\n\nclass Contract_typeAdmin(admin.ModelAdmin):\n list_display = ['id', 'contract_type']\n list_display_links = ['id']\n list_editable = ['contract_type']\n list_filter = ['contract_type']\n search_fields = ['contract_type']\n\n\n<code token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['id', 'currency_type']\n list_display_links = ['id']\n list_editable = ['currency_type']\n list_filter = ['currency_type']\n search_fields = ['currency_type']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n list_display = ['id', 'name']\n list_display_links = ['id']\n list_editable = ['name']\n list_filter = ['name']\n search_fields = ['name']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n\n\nclass Contract_typeAdmin(admin.ModelAdmin):\n list_display = ['id', 'contract_type']\n list_display_links = ['id']\n list_editable = ['contract_type']\n list_filter = ['contract_type']\n search_fields = ['contract_type']\n\n\n<code token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['id', 'currency_type']\n list_display_links = ['id']\n list_editable = ['currency_type']\n list_filter = ['currency_type']\n search_fields = ['currency_type']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n\n\nclass Contract_typeAdmin(admin.ModelAdmin):\n list_display = ['id', 'contract_type']\n list_display_links = ['id']\n list_editable = ['contract_type']\n list_filter = ['contract_type']\n search_fields = ['contract_type']\n\n\n<code token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['id', 'currency_type']\n list_display_links = ['id']\n list_editable = ['currency_type']\n list_filter = ['currency_type']\n search_fields = ['currency_type']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n<class token>\n<code token>\n\n\nclass Contract_typeAdmin(admin.ModelAdmin):\n list_display = ['id', 'contract_type']\n list_display_links = ['id']\n list_editable = ['contract_type']\n list_filter = ['contract_type']\n search_fields = ['contract_type']\n\n\n<code token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['id', 'currency_type']\n list_display_links = ['id']\n list_editable = ['currency_type']\n list_filter = ['currency_type']\n search_fields = ['currency_type']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n<class token>\n<code token>\n\n\nclass Contract_typeAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<code token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['id', 'currency_type']\n list_display_links = ['id']\n list_editable = ['currency_type']\n list_filter = ['currency_type']\n search_fields = ['currency_type']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n<class token>\n<code token>\n<class token>\n<code token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = ['id', 'currency_type']\n list_display_links = ['id']\n list_editable = ['currency_type']\n list_filter = ['currency_type']\n search_fields = ['currency_type']\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n<class token>\n<code token>\n<class token>\n<code token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n verbose_name_plural = 'companies'\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n<class token>\n<code token>\n<class token>\n<code token>\n<class token>\n<code token>\n"
] | false |
98,605 |
c0ece1fd1eb19927510801231b5ed2465aa88fa3
|
# pytest-osxnotify
# Mac OS X notification center support for py.test
# Requirements: pyobjc-core
import time
# Lazy-import pyobjc to work around a conflict with pytest-xdist
# looponfail on Python 3.3
objc = None
def pytest_addoption(parser):
"""
Adds options to control notifications.
"""
group = parser.getgroup('terminal reporting')
group.addoption(
'--osxnotify',
dest='osxnotify',
default=True,
help='Enable Mac OS X notification center notifications.'
)
def pytest_sessionstart(session):
if session.config.option.osxnotify:
notify('py.test', 'Running tests...')
def pytest_terminal_summary(terminalreporter):
if not terminalreporter.config.option.osxnotify:
return
tr = terminalreporter
passes = len(tr.stats.get('passed', []))
fails = len(tr.stats.get('failed', []))
skips = len(tr.stats.get('deselected', []))
errors = len(tr.stats.get('error', []))
if errors + passes + fails + skips == 0:
msg = 'No tests ran'
elif passes and not (fails or errors):
msg = 'Success - %i Passed' % passes
elif not (skips or errors):
msg = '%s Passed %s Failed' % (passes, fails)
else:
msg = '%s Passed %s Failed %s Errors %s Skipped' % (
passes, fails, errors, skips
)
notify('py.test', msg)
# Delay a bit to ensure that all notifications get displayed
# even if py.test finishes very quickly.
# It's unfortunate that this is a magic value for now.
time.sleep(0.3)
def swizzle(cls, SEL, func):
old_IMP = getattr(cls, SEL, None)
if old_IMP is None:
# This will work on OS X <= 10.9
old_IMP = cls.instanceMethodForSelector_(SEL)
def wrapper(self, *args, **kwargs):
return func(self, old_IMP, *args, **kwargs)
new_IMP = objc.selector(
wrapper,
selector=old_IMP.selector,
signature=old_IMP.signature
)
objc.classAddMethod(cls, SEL.encode(), new_IMP)
def notify(title, subtitle=None):
"""
Display a NSUserNotification on Mac OS X >= 10.8
"""
global objc
if not objc:
objc = __import__('objc')
swizzle(
objc.lookUpClass('NSBundle'),
'bundleIdentifier',
swizzled_bundleIdentifier
)
NSUserNotification = objc.lookUpClass('NSUserNotification')
NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')
if not NSUserNotification or not NSUserNotificationCenter:
print('NSUserNotifcation is not supported by your version of Mac OS X')
return
notification = NSUserNotification.alloc().init()
notification.setTitle_(str(title))
if subtitle:
notification.setSubtitle_(str(subtitle))
notification_center = NSUserNotificationCenter.defaultUserNotificationCenter()
notification_center.deliverNotification_(notification)
def swizzled_bundleIdentifier(self, original):
"""
Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.
To post NSUserNotifications OS X requires the binary to be packaged
as an application bundle. To circumvent this restriction, we modify
`bundleIdentifier` to return a fake bundle identifier.
Original idea for this approach by Norio Numura:
https://github.com/norio-nomura/usernotification
"""
return 'com.apple.terminal'
|
[
"# pytest-osxnotify\n# Mac OS X notification center support for py.test\n# Requirements: pyobjc-core\nimport time\n\n# Lazy-import pyobjc to work around a conflict with pytest-xdist\n# looponfail on Python 3.3\nobjc = None\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Adds options to control notifications.\n\n \"\"\"\n group = parser.getgroup('terminal reporting')\n group.addoption(\n '--osxnotify',\n dest='osxnotify',\n default=True,\n help='Enable Mac OS X notification center notifications.'\n )\n\n\ndef pytest_sessionstart(session):\n if session.config.option.osxnotify:\n notify('py.test', 'Running tests...')\n\n\ndef pytest_terminal_summary(terminalreporter):\n if not terminalreporter.config.option.osxnotify:\n return\n tr = terminalreporter\n passes = len(tr.stats.get('passed', []))\n fails = len(tr.stats.get('failed', []))\n skips = len(tr.stats.get('deselected', []))\n errors = len(tr.stats.get('error', []))\n if errors + passes + fails + skips == 0:\n msg = 'No tests ran'\n elif passes and not (fails or errors):\n msg = 'Success - %i Passed' % passes\n elif not (skips or errors):\n msg = '%s Passed %s Failed' % (passes, fails)\n else:\n msg = '%s Passed %s Failed %s Errors %s Skipped' % (\n passes, fails, errors, skips\n )\n notify('py.test', msg)\n # Delay a bit to ensure that all notifications get displayed\n # even if py.test finishes very quickly.\n # It's unfortunate that this is a magic value for now.\n time.sleep(0.3)\n\n\ndef swizzle(cls, SEL, func):\n old_IMP = getattr(cls, SEL, None)\n if old_IMP is None:\n # This will work on OS X <= 10.9\n old_IMP = cls.instanceMethodForSelector_(SEL)\n\n def wrapper(self, *args, **kwargs):\n return func(self, old_IMP, *args, **kwargs)\n\n new_IMP = objc.selector(\n wrapper,\n selector=old_IMP.selector,\n signature=old_IMP.signature\n )\n objc.classAddMethod(cls, SEL.encode(), new_IMP)\n\n\ndef notify(title, subtitle=None):\n \"\"\"\n Display a NSUserNotification on Mac OS X >= 10.8\n\n \"\"\"\n global objc\n if not objc:\n objc = __import__('objc')\n swizzle(\n objc.lookUpClass('NSBundle'),\n 'bundleIdentifier',\n swizzled_bundleIdentifier\n )\n\n NSUserNotification = objc.lookUpClass('NSUserNotification')\n NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')\n if not NSUserNotification or not NSUserNotificationCenter:\n print('NSUserNotifcation is not supported by your version of Mac OS X')\n return\n\n notification = NSUserNotification.alloc().init()\n notification.setTitle_(str(title))\n if subtitle:\n notification.setSubtitle_(str(subtitle))\n\n notification_center = NSUserNotificationCenter.defaultUserNotificationCenter()\n notification_center.deliverNotification_(notification)\n\n\ndef swizzled_bundleIdentifier(self, original):\n \"\"\"\n Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.\n\n To post NSUserNotifications OS X requires the binary to be packaged\n as an application bundle. To circumvent this restriction, we modify\n `bundleIdentifier` to return a fake bundle identifier.\n\n Original idea for this approach by Norio Numura:\n https://github.com/norio-nomura/usernotification\n\n \"\"\"\n return 'com.apple.terminal'\n",
"import time\nobjc = None\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Adds options to control notifications.\n\n \"\"\"\n group = parser.getgroup('terminal reporting')\n group.addoption('--osxnotify', dest='osxnotify', default=True, help=\n 'Enable Mac OS X notification center notifications.')\n\n\ndef pytest_sessionstart(session):\n if session.config.option.osxnotify:\n notify('py.test', 'Running tests...')\n\n\ndef pytest_terminal_summary(terminalreporter):\n if not terminalreporter.config.option.osxnotify:\n return\n tr = terminalreporter\n passes = len(tr.stats.get('passed', []))\n fails = len(tr.stats.get('failed', []))\n skips = len(tr.stats.get('deselected', []))\n errors = len(tr.stats.get('error', []))\n if errors + passes + fails + skips == 0:\n msg = 'No tests ran'\n elif passes and not (fails or errors):\n msg = 'Success - %i Passed' % passes\n elif not (skips or errors):\n msg = '%s Passed %s Failed' % (passes, fails)\n else:\n msg = '%s Passed %s Failed %s Errors %s Skipped' % (passes, fails,\n errors, skips)\n notify('py.test', msg)\n time.sleep(0.3)\n\n\ndef swizzle(cls, SEL, func):\n old_IMP = getattr(cls, SEL, None)\n if old_IMP is None:\n old_IMP = cls.instanceMethodForSelector_(SEL)\n\n def wrapper(self, *args, **kwargs):\n return func(self, old_IMP, *args, **kwargs)\n new_IMP = objc.selector(wrapper, selector=old_IMP.selector, signature=\n old_IMP.signature)\n objc.classAddMethod(cls, SEL.encode(), new_IMP)\n\n\ndef notify(title, subtitle=None):\n \"\"\"\n Display a NSUserNotification on Mac OS X >= 10.8\n\n \"\"\"\n global objc\n if not objc:\n objc = __import__('objc')\n swizzle(objc.lookUpClass('NSBundle'), 'bundleIdentifier',\n swizzled_bundleIdentifier)\n NSUserNotification = objc.lookUpClass('NSUserNotification')\n NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')\n if not NSUserNotification or not NSUserNotificationCenter:\n print('NSUserNotifcation is not supported by your version of Mac OS X')\n return\n notification = NSUserNotification.alloc().init()\n notification.setTitle_(str(title))\n if subtitle:\n notification.setSubtitle_(str(subtitle))\n notification_center = (NSUserNotificationCenter.\n defaultUserNotificationCenter())\n notification_center.deliverNotification_(notification)\n\n\ndef swizzled_bundleIdentifier(self, original):\n \"\"\"\n Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.\n\n To post NSUserNotifications OS X requires the binary to be packaged\n as an application bundle. To circumvent this restriction, we modify\n `bundleIdentifier` to return a fake bundle identifier.\n\n Original idea for this approach by Norio Numura:\n https://github.com/norio-nomura/usernotification\n\n \"\"\"\n return 'com.apple.terminal'\n",
"<import token>\nobjc = None\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Adds options to control notifications.\n\n \"\"\"\n group = parser.getgroup('terminal reporting')\n group.addoption('--osxnotify', dest='osxnotify', default=True, help=\n 'Enable Mac OS X notification center notifications.')\n\n\ndef pytest_sessionstart(session):\n if session.config.option.osxnotify:\n notify('py.test', 'Running tests...')\n\n\ndef pytest_terminal_summary(terminalreporter):\n if not terminalreporter.config.option.osxnotify:\n return\n tr = terminalreporter\n passes = len(tr.stats.get('passed', []))\n fails = len(tr.stats.get('failed', []))\n skips = len(tr.stats.get('deselected', []))\n errors = len(tr.stats.get('error', []))\n if errors + passes + fails + skips == 0:\n msg = 'No tests ran'\n elif passes and not (fails or errors):\n msg = 'Success - %i Passed' % passes\n elif not (skips or errors):\n msg = '%s Passed %s Failed' % (passes, fails)\n else:\n msg = '%s Passed %s Failed %s Errors %s Skipped' % (passes, fails,\n errors, skips)\n notify('py.test', msg)\n time.sleep(0.3)\n\n\ndef swizzle(cls, SEL, func):\n old_IMP = getattr(cls, SEL, None)\n if old_IMP is None:\n old_IMP = cls.instanceMethodForSelector_(SEL)\n\n def wrapper(self, *args, **kwargs):\n return func(self, old_IMP, *args, **kwargs)\n new_IMP = objc.selector(wrapper, selector=old_IMP.selector, signature=\n old_IMP.signature)\n objc.classAddMethod(cls, SEL.encode(), new_IMP)\n\n\ndef notify(title, subtitle=None):\n \"\"\"\n Display a NSUserNotification on Mac OS X >= 10.8\n\n \"\"\"\n global objc\n if not objc:\n objc = __import__('objc')\n swizzle(objc.lookUpClass('NSBundle'), 'bundleIdentifier',\n swizzled_bundleIdentifier)\n NSUserNotification = objc.lookUpClass('NSUserNotification')\n NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')\n if not NSUserNotification or not NSUserNotificationCenter:\n print('NSUserNotifcation is not supported by your version of Mac OS X')\n return\n notification = NSUserNotification.alloc().init()\n notification.setTitle_(str(title))\n if subtitle:\n notification.setSubtitle_(str(subtitle))\n notification_center = (NSUserNotificationCenter.\n defaultUserNotificationCenter())\n notification_center.deliverNotification_(notification)\n\n\ndef swizzled_bundleIdentifier(self, original):\n \"\"\"\n Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.\n\n To post NSUserNotifications OS X requires the binary to be packaged\n as an application bundle. To circumvent this restriction, we modify\n `bundleIdentifier` to return a fake bundle identifier.\n\n Original idea for this approach by Norio Numura:\n https://github.com/norio-nomura/usernotification\n\n \"\"\"\n return 'com.apple.terminal'\n",
"<import token>\n<assignment token>\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Adds options to control notifications.\n\n \"\"\"\n group = parser.getgroup('terminal reporting')\n group.addoption('--osxnotify', dest='osxnotify', default=True, help=\n 'Enable Mac OS X notification center notifications.')\n\n\ndef pytest_sessionstart(session):\n if session.config.option.osxnotify:\n notify('py.test', 'Running tests...')\n\n\ndef pytest_terminal_summary(terminalreporter):\n if not terminalreporter.config.option.osxnotify:\n return\n tr = terminalreporter\n passes = len(tr.stats.get('passed', []))\n fails = len(tr.stats.get('failed', []))\n skips = len(tr.stats.get('deselected', []))\n errors = len(tr.stats.get('error', []))\n if errors + passes + fails + skips == 0:\n msg = 'No tests ran'\n elif passes and not (fails or errors):\n msg = 'Success - %i Passed' % passes\n elif not (skips or errors):\n msg = '%s Passed %s Failed' % (passes, fails)\n else:\n msg = '%s Passed %s Failed %s Errors %s Skipped' % (passes, fails,\n errors, skips)\n notify('py.test', msg)\n time.sleep(0.3)\n\n\ndef swizzle(cls, SEL, func):\n old_IMP = getattr(cls, SEL, None)\n if old_IMP is None:\n old_IMP = cls.instanceMethodForSelector_(SEL)\n\n def wrapper(self, *args, **kwargs):\n return func(self, old_IMP, *args, **kwargs)\n new_IMP = objc.selector(wrapper, selector=old_IMP.selector, signature=\n old_IMP.signature)\n objc.classAddMethod(cls, SEL.encode(), new_IMP)\n\n\ndef notify(title, subtitle=None):\n \"\"\"\n Display a NSUserNotification on Mac OS X >= 10.8\n\n \"\"\"\n global objc\n if not objc:\n objc = __import__('objc')\n swizzle(objc.lookUpClass('NSBundle'), 'bundleIdentifier',\n swizzled_bundleIdentifier)\n NSUserNotification = objc.lookUpClass('NSUserNotification')\n NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')\n if not NSUserNotification or not NSUserNotificationCenter:\n print('NSUserNotifcation is not supported by your version of Mac OS X')\n return\n notification = NSUserNotification.alloc().init()\n notification.setTitle_(str(title))\n if subtitle:\n notification.setSubtitle_(str(subtitle))\n notification_center = (NSUserNotificationCenter.\n defaultUserNotificationCenter())\n notification_center.deliverNotification_(notification)\n\n\ndef swizzled_bundleIdentifier(self, original):\n \"\"\"\n Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.\n\n To post NSUserNotifications OS X requires the binary to be packaged\n as an application bundle. To circumvent this restriction, we modify\n `bundleIdentifier` to return a fake bundle identifier.\n\n Original idea for this approach by Norio Numura:\n https://github.com/norio-nomura/usernotification\n\n \"\"\"\n return 'com.apple.terminal'\n",
"<import token>\n<assignment token>\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Adds options to control notifications.\n\n \"\"\"\n group = parser.getgroup('terminal reporting')\n group.addoption('--osxnotify', dest='osxnotify', default=True, help=\n 'Enable Mac OS X notification center notifications.')\n\n\ndef pytest_sessionstart(session):\n if session.config.option.osxnotify:\n notify('py.test', 'Running tests...')\n\n\n<function token>\n\n\ndef swizzle(cls, SEL, func):\n old_IMP = getattr(cls, SEL, None)\n if old_IMP is None:\n old_IMP = cls.instanceMethodForSelector_(SEL)\n\n def wrapper(self, *args, **kwargs):\n return func(self, old_IMP, *args, **kwargs)\n new_IMP = objc.selector(wrapper, selector=old_IMP.selector, signature=\n old_IMP.signature)\n objc.classAddMethod(cls, SEL.encode(), new_IMP)\n\n\ndef notify(title, subtitle=None):\n \"\"\"\n Display a NSUserNotification on Mac OS X >= 10.8\n\n \"\"\"\n global objc\n if not objc:\n objc = __import__('objc')\n swizzle(objc.lookUpClass('NSBundle'), 'bundleIdentifier',\n swizzled_bundleIdentifier)\n NSUserNotification = objc.lookUpClass('NSUserNotification')\n NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')\n if not NSUserNotification or not NSUserNotificationCenter:\n print('NSUserNotifcation is not supported by your version of Mac OS X')\n return\n notification = NSUserNotification.alloc().init()\n notification.setTitle_(str(title))\n if subtitle:\n notification.setSubtitle_(str(subtitle))\n notification_center = (NSUserNotificationCenter.\n defaultUserNotificationCenter())\n notification_center.deliverNotification_(notification)\n\n\ndef swizzled_bundleIdentifier(self, original):\n \"\"\"\n Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.\n\n To post NSUserNotifications OS X requires the binary to be packaged\n as an application bundle. To circumvent this restriction, we modify\n `bundleIdentifier` to return a fake bundle identifier.\n\n Original idea for this approach by Norio Numura:\n https://github.com/norio-nomura/usernotification\n\n \"\"\"\n return 'com.apple.terminal'\n",
"<import token>\n<assignment token>\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Adds options to control notifications.\n\n \"\"\"\n group = parser.getgroup('terminal reporting')\n group.addoption('--osxnotify', dest='osxnotify', default=True, help=\n 'Enable Mac OS X notification center notifications.')\n\n\n<function token>\n<function token>\n\n\ndef swizzle(cls, SEL, func):\n old_IMP = getattr(cls, SEL, None)\n if old_IMP is None:\n old_IMP = cls.instanceMethodForSelector_(SEL)\n\n def wrapper(self, *args, **kwargs):\n return func(self, old_IMP, *args, **kwargs)\n new_IMP = objc.selector(wrapper, selector=old_IMP.selector, signature=\n old_IMP.signature)\n objc.classAddMethod(cls, SEL.encode(), new_IMP)\n\n\ndef notify(title, subtitle=None):\n \"\"\"\n Display a NSUserNotification on Mac OS X >= 10.8\n\n \"\"\"\n global objc\n if not objc:\n objc = __import__('objc')\n swizzle(objc.lookUpClass('NSBundle'), 'bundleIdentifier',\n swizzled_bundleIdentifier)\n NSUserNotification = objc.lookUpClass('NSUserNotification')\n NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')\n if not NSUserNotification or not NSUserNotificationCenter:\n print('NSUserNotifcation is not supported by your version of Mac OS X')\n return\n notification = NSUserNotification.alloc().init()\n notification.setTitle_(str(title))\n if subtitle:\n notification.setSubtitle_(str(subtitle))\n notification_center = (NSUserNotificationCenter.\n defaultUserNotificationCenter())\n notification_center.deliverNotification_(notification)\n\n\ndef swizzled_bundleIdentifier(self, original):\n \"\"\"\n Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.\n\n To post NSUserNotifications OS X requires the binary to be packaged\n as an application bundle. To circumvent this restriction, we modify\n `bundleIdentifier` to return a fake bundle identifier.\n\n Original idea for this approach by Norio Numura:\n https://github.com/norio-nomura/usernotification\n\n \"\"\"\n return 'com.apple.terminal'\n",
"<import token>\n<assignment token>\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Adds options to control notifications.\n\n \"\"\"\n group = parser.getgroup('terminal reporting')\n group.addoption('--osxnotify', dest='osxnotify', default=True, help=\n 'Enable Mac OS X notification center notifications.')\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef notify(title, subtitle=None):\n \"\"\"\n Display a NSUserNotification on Mac OS X >= 10.8\n\n \"\"\"\n global objc\n if not objc:\n objc = __import__('objc')\n swizzle(objc.lookUpClass('NSBundle'), 'bundleIdentifier',\n swizzled_bundleIdentifier)\n NSUserNotification = objc.lookUpClass('NSUserNotification')\n NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')\n if not NSUserNotification or not NSUserNotificationCenter:\n print('NSUserNotifcation is not supported by your version of Mac OS X')\n return\n notification = NSUserNotification.alloc().init()\n notification.setTitle_(str(title))\n if subtitle:\n notification.setSubtitle_(str(subtitle))\n notification_center = (NSUserNotificationCenter.\n defaultUserNotificationCenter())\n notification_center.deliverNotification_(notification)\n\n\ndef swizzled_bundleIdentifier(self, original):\n \"\"\"\n Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.\n\n To post NSUserNotifications OS X requires the binary to be packaged\n as an application bundle. To circumvent this restriction, we modify\n `bundleIdentifier` to return a fake bundle identifier.\n\n Original idea for this approach by Norio Numura:\n https://github.com/norio-nomura/usernotification\n\n \"\"\"\n return 'com.apple.terminal'\n",
"<import token>\n<assignment token>\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Adds options to control notifications.\n\n \"\"\"\n group = parser.getgroup('terminal reporting')\n group.addoption('--osxnotify', dest='osxnotify', default=True, help=\n 'Enable Mac OS X notification center notifications.')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef swizzled_bundleIdentifier(self, original):\n \"\"\"\n Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.\n\n To post NSUserNotifications OS X requires the binary to be packaged\n as an application bundle. To circumvent this restriction, we modify\n `bundleIdentifier` to return a fake bundle identifier.\n\n Original idea for this approach by Norio Numura:\n https://github.com/norio-nomura/usernotification\n\n \"\"\"\n return 'com.apple.terminal'\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef swizzled_bundleIdentifier(self, original):\n \"\"\"\n Swizzle [NSBundle bundleIdentifier] to make NSUserNotifications work.\n\n To post NSUserNotifications OS X requires the binary to be packaged\n as an application bundle. To circumvent this restriction, we modify\n `bundleIdentifier` to return a fake bundle identifier.\n\n Original idea for this approach by Norio Numura:\n https://github.com/norio-nomura/usernotification\n\n \"\"\"\n return 'com.apple.terminal'\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,606 |
9771eed454fdd8eeb87d89af890b636699a15b17
|
import cv2
import numpy as np
from functools import wraps
from dataclasses import dataclass, field
def split_left_right(array, frame_width, frame_height):
''' helper func to distinguish lines marking left and right lanes '''
left_lines, right_lines = [], []
for _, item in enumerate(array):
if (0 <= item[0, 0] <= 0.2 * frame_width) and \
(0 <= item[0, 2] <= 0.6 * frame_width):
left_lines.append(item)
elif (0.4 * frame_width <= item[0, 0] <= frame_width) and \
(0.6 * frame_width <= item[0, 2] <= frame_width):
right_lines.append(item)
return np.array(left_lines), np.array(right_lines)
def get_laneangle(lane):
''' helper func to calc langeangle in degrees '''
x1, y1, x2, y2 = lane[0]
m = (y2 - y1) / (x2 - x1)
angle = np.arctan(m)
return np.degrees(angle)
def lane_detection(roi_shape="square"):
''' decorator to detect lanes in video frame '''
def inner_decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
frame = func(*args, **kwargs)
left_lane, right_lane = np.array([]), np.array([])
# TODO: detect ROI
white = np.ones((288, 352, 1), dtype=np.uint8) * 255
if roi_shape == "square":
roi = np.array([[0, frame.shape[0]//2], [frame.shape[1],
frame.shape[0]//2], [frame.shape[1],
frame.shape[0]], [0, frame.shape[0]]])
else:
roi = np.array([[0, 288], [0, 230], [88, 130], [264, 130],
[352, 230], [352, 288]])
stencil = cv2.fillConvexPoly(white, roi, 0)
# TODO: to grayscale and tresholding
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_binary = cv2.threshold(frame_gray, 80, 255, cv2.THRESH_BINARY)[1]
roi_frame = cv2.add(frame_binary, stencil)
# TODO: Hough line transformation
lines = cv2.HoughLinesP(cv2.bitwise_not(roi_frame), 1, theta=np.pi/180,
threshold=30, minLineLength=80, maxLineGap=50)
# get lanes
# if-clause b/c cv2.HoughLineP(...) returns None if nothing is detected
if str(type(lines)) == "<class 'numpy.ndarray'>":
left_lines, right_lines = split_left_right(lines, 352, 288)
frame_lines = np.copy(frame)
if left_lines.size != 0:
for line in left_lines:
x1, y1, x2, y2 = line[0]
cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 255, 0), 3)
left_lane = np.mean(left_lines, axis=0, dtype=np.int32)
x1, y1, x2, y2 = left_lane[0]
cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)
if right_lines.size != 0:
for line in right_lines:
x1, y1, x2, y2 = line[0]
cv2.line(frame_lines, (x1, y1), (x2, y2), (255, 0, 0), 3)
right_lane = np.mean(right_lines, axis=0, dtype=np.int32)
x1, y1, x2, y2 = right_lane[0]
cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)
return frame, frame_lines, roi_frame, left_lane, right_lane
return func_wrapper
return inner_decorator
@dataclass
class Video:
'''
Dataclass to manage video input with
src = 0 -> webcam
'''
src: int
width: float
height: float
cap: object = field(init=False)
def __post_init__(self):
self.cap = cv2.VideoCapture(self.src)
self.cap.set(3, self.width)
self.cap.set(4, self.height)
@lane_detection("square")
def get_frame(self):
ret, frame = self.cap.read()
if ret:
return frame
if __name__ == "__main__":
video = Video(0, 352, 288)
while True:
frame, frame_lines, roi_frame, left_lane, right_lane = video.get_frame()
cv2.imshow("frame", frame)
cv2.imshow("frame w/ lines", frame_lines)
cv2.imshow("ROI frame", roi_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
[
"import cv2\nimport numpy as np\n\nfrom functools import wraps\nfrom dataclasses import dataclass, field\n\n\ndef split_left_right(array, frame_width, frame_height):\n ''' helper func to distinguish lines marking left and right lanes '''\n left_lines, right_lines = [], []\n\n for _, item in enumerate(array):\n if (0 <= item[0, 0] <= 0.2 * frame_width) and \\\n (0 <= item[0, 2] <= 0.6 * frame_width):\n left_lines.append(item)\n\n elif (0.4 * frame_width <= item[0, 0] <= frame_width) and \\\n (0.6 * frame_width <= item[0, 2] <= frame_width):\n right_lines.append(item)\n\n return np.array(left_lines), np.array(right_lines)\n\n\ndef get_laneangle(lane):\n ''' helper func to calc langeangle in degrees '''\n x1, y1, x2, y2 = lane[0]\n m = (y2 - y1) / (x2 - x1)\n angle = np.arctan(m)\n\n return np.degrees(angle)\n\n\ndef lane_detection(roi_shape=\"square\"):\n ''' decorator to detect lanes in video frame '''\n def inner_decorator(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n frame = func(*args, **kwargs)\n left_lane, right_lane = np.array([]), np.array([])\n\n # TODO: detect ROI\n white = np.ones((288, 352, 1), dtype=np.uint8) * 255\n\n if roi_shape == \"square\":\n roi = np.array([[0, frame.shape[0]//2], [frame.shape[1],\n frame.shape[0]//2], [frame.shape[1],\n frame.shape[0]], [0, frame.shape[0]]])\n else:\n roi = np.array([[0, 288], [0, 230], [88, 130], [264, 130],\n [352, 230], [352, 288]])\n\n stencil = cv2.fillConvexPoly(white, roi, 0)\n\n # TODO: to grayscale and tresholding\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_binary = cv2.threshold(frame_gray, 80, 255, cv2.THRESH_BINARY)[1]\n roi_frame = cv2.add(frame_binary, stencil)\n\n # TODO: Hough line transformation\n lines = cv2.HoughLinesP(cv2.bitwise_not(roi_frame), 1, theta=np.pi/180,\n threshold=30, minLineLength=80, maxLineGap=50)\n\n # get lanes\n # if-clause b/c cv2.HoughLineP(...) returns None if nothing is detected\n if str(type(lines)) == \"<class 'numpy.ndarray'>\":\n left_lines, right_lines = split_left_right(lines, 352, 288)\n frame_lines = np.copy(frame)\n\n if left_lines.size != 0:\n for line in left_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 255, 0), 3)\n\n left_lane = np.mean(left_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = left_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n\n if right_lines.size != 0:\n for line in right_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (255, 0, 0), 3)\n\n right_lane = np.mean(right_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = right_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n\n return frame, frame_lines, roi_frame, left_lane, right_lane\n\n return func_wrapper\n\n return inner_decorator\n\n\n@dataclass\nclass Video:\n '''\n Dataclass to manage video input with\n src = 0 -> webcam\n '''\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n\n def __post_init__(self):\n self.cap = cv2.VideoCapture(self.src)\n self.cap.set(3, self.width)\n self.cap.set(4, self.height)\n\n @lane_detection(\"square\")\n def get_frame(self):\n ret, frame = self.cap.read()\n if ret:\n return frame\n\n\nif __name__ == \"__main__\":\n video = Video(0, 352, 288)\n\n while True:\n frame, frame_lines, roi_frame, left_lane, right_lane = video.get_frame()\n cv2.imshow(\"frame\", frame)\n cv2.imshow(\"frame w/ lines\", frame_lines)\n cv2.imshow(\"ROI frame\", roi_frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n",
"import cv2\nimport numpy as np\nfrom functools import wraps\nfrom dataclasses import dataclass, field\n\n\ndef split_left_right(array, frame_width, frame_height):\n \"\"\" helper func to distinguish lines marking left and right lanes \"\"\"\n left_lines, right_lines = [], []\n for _, item in enumerate(array):\n if 0 <= item[0, 0] <= 0.2 * frame_width and 0 <= item[0, 2\n ] <= 0.6 * frame_width:\n left_lines.append(item)\n elif 0.4 * frame_width <= item[0, 0\n ] <= frame_width and 0.6 * frame_width <= item[0, 2\n ] <= frame_width:\n right_lines.append(item)\n return np.array(left_lines), np.array(right_lines)\n\n\ndef get_laneangle(lane):\n \"\"\" helper func to calc langeangle in degrees \"\"\"\n x1, y1, x2, y2 = lane[0]\n m = (y2 - y1) / (x2 - x1)\n angle = np.arctan(m)\n return np.degrees(angle)\n\n\ndef lane_detection(roi_shape='square'):\n \"\"\" decorator to detect lanes in video frame \"\"\"\n\n def inner_decorator(func):\n\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n frame = func(*args, **kwargs)\n left_lane, right_lane = np.array([]), np.array([])\n white = np.ones((288, 352, 1), dtype=np.uint8) * 255\n if roi_shape == 'square':\n roi = np.array([[0, frame.shape[0] // 2], [frame.shape[1], \n frame.shape[0] // 2], [frame.shape[1], frame.shape[0]],\n [0, frame.shape[0]]])\n else:\n roi = np.array([[0, 288], [0, 230], [88, 130], [264, 130],\n [352, 230], [352, 288]])\n stencil = cv2.fillConvexPoly(white, roi, 0)\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_binary = cv2.threshold(frame_gray, 80, 255, cv2.THRESH_BINARY\n )[1]\n roi_frame = cv2.add(frame_binary, stencil)\n lines = cv2.HoughLinesP(cv2.bitwise_not(roi_frame), 1, theta=np\n .pi / 180, threshold=30, minLineLength=80, maxLineGap=50)\n if str(type(lines)) == \"<class 'numpy.ndarray'>\":\n left_lines, right_lines = split_left_right(lines, 352, 288)\n frame_lines = np.copy(frame)\n if left_lines.size != 0:\n for line in left_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 255, \n 0), 3)\n left_lane = np.mean(left_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = left_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n if right_lines.size != 0:\n for line in right_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (255, 0, \n 0), 3)\n right_lane = np.mean(right_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = right_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n return frame, frame_lines, roi_frame, left_lane, right_lane\n return func_wrapper\n return inner_decorator\n\n\n@dataclass\nclass Video:\n \"\"\"\n Dataclass to manage video input with\n src = 0 -> webcam\n \"\"\"\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n\n def __post_init__(self):\n self.cap = cv2.VideoCapture(self.src)\n self.cap.set(3, self.width)\n self.cap.set(4, self.height)\n\n @lane_detection('square')\n def get_frame(self):\n ret, frame = self.cap.read()\n if ret:\n return frame\n\n\nif __name__ == '__main__':\n video = Video(0, 352, 288)\n while True:\n frame, frame_lines, roi_frame, left_lane, right_lane = video.get_frame(\n )\n cv2.imshow('frame', frame)\n cv2.imshow('frame w/ lines', frame_lines)\n cv2.imshow('ROI frame', roi_frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n",
"<import token>\n\n\ndef split_left_right(array, frame_width, frame_height):\n \"\"\" helper func to distinguish lines marking left and right lanes \"\"\"\n left_lines, right_lines = [], []\n for _, item in enumerate(array):\n if 0 <= item[0, 0] <= 0.2 * frame_width and 0 <= item[0, 2\n ] <= 0.6 * frame_width:\n left_lines.append(item)\n elif 0.4 * frame_width <= item[0, 0\n ] <= frame_width and 0.6 * frame_width <= item[0, 2\n ] <= frame_width:\n right_lines.append(item)\n return np.array(left_lines), np.array(right_lines)\n\n\ndef get_laneangle(lane):\n \"\"\" helper func to calc langeangle in degrees \"\"\"\n x1, y1, x2, y2 = lane[0]\n m = (y2 - y1) / (x2 - x1)\n angle = np.arctan(m)\n return np.degrees(angle)\n\n\ndef lane_detection(roi_shape='square'):\n \"\"\" decorator to detect lanes in video frame \"\"\"\n\n def inner_decorator(func):\n\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n frame = func(*args, **kwargs)\n left_lane, right_lane = np.array([]), np.array([])\n white = np.ones((288, 352, 1), dtype=np.uint8) * 255\n if roi_shape == 'square':\n roi = np.array([[0, frame.shape[0] // 2], [frame.shape[1], \n frame.shape[0] // 2], [frame.shape[1], frame.shape[0]],\n [0, frame.shape[0]]])\n else:\n roi = np.array([[0, 288], [0, 230], [88, 130], [264, 130],\n [352, 230], [352, 288]])\n stencil = cv2.fillConvexPoly(white, roi, 0)\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_binary = cv2.threshold(frame_gray, 80, 255, cv2.THRESH_BINARY\n )[1]\n roi_frame = cv2.add(frame_binary, stencil)\n lines = cv2.HoughLinesP(cv2.bitwise_not(roi_frame), 1, theta=np\n .pi / 180, threshold=30, minLineLength=80, maxLineGap=50)\n if str(type(lines)) == \"<class 'numpy.ndarray'>\":\n left_lines, right_lines = split_left_right(lines, 352, 288)\n frame_lines = np.copy(frame)\n if left_lines.size != 0:\n for line in left_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 255, \n 0), 3)\n left_lane = np.mean(left_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = left_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n if right_lines.size != 0:\n for line in right_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (255, 0, \n 0), 3)\n right_lane = np.mean(right_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = right_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n return frame, frame_lines, roi_frame, left_lane, right_lane\n return func_wrapper\n return inner_decorator\n\n\n@dataclass\nclass Video:\n \"\"\"\n Dataclass to manage video input with\n src = 0 -> webcam\n \"\"\"\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n\n def __post_init__(self):\n self.cap = cv2.VideoCapture(self.src)\n self.cap.set(3, self.width)\n self.cap.set(4, self.height)\n\n @lane_detection('square')\n def get_frame(self):\n ret, frame = self.cap.read()\n if ret:\n return frame\n\n\nif __name__ == '__main__':\n video = Video(0, 352, 288)\n while True:\n frame, frame_lines, roi_frame, left_lane, right_lane = video.get_frame(\n )\n cv2.imshow('frame', frame)\n cv2.imshow('frame w/ lines', frame_lines)\n cv2.imshow('ROI frame', roi_frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n",
"<import token>\n\n\ndef split_left_right(array, frame_width, frame_height):\n \"\"\" helper func to distinguish lines marking left and right lanes \"\"\"\n left_lines, right_lines = [], []\n for _, item in enumerate(array):\n if 0 <= item[0, 0] <= 0.2 * frame_width and 0 <= item[0, 2\n ] <= 0.6 * frame_width:\n left_lines.append(item)\n elif 0.4 * frame_width <= item[0, 0\n ] <= frame_width and 0.6 * frame_width <= item[0, 2\n ] <= frame_width:\n right_lines.append(item)\n return np.array(left_lines), np.array(right_lines)\n\n\ndef get_laneangle(lane):\n \"\"\" helper func to calc langeangle in degrees \"\"\"\n x1, y1, x2, y2 = lane[0]\n m = (y2 - y1) / (x2 - x1)\n angle = np.arctan(m)\n return np.degrees(angle)\n\n\ndef lane_detection(roi_shape='square'):\n \"\"\" decorator to detect lanes in video frame \"\"\"\n\n def inner_decorator(func):\n\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n frame = func(*args, **kwargs)\n left_lane, right_lane = np.array([]), np.array([])\n white = np.ones((288, 352, 1), dtype=np.uint8) * 255\n if roi_shape == 'square':\n roi = np.array([[0, frame.shape[0] // 2], [frame.shape[1], \n frame.shape[0] // 2], [frame.shape[1], frame.shape[0]],\n [0, frame.shape[0]]])\n else:\n roi = np.array([[0, 288], [0, 230], [88, 130], [264, 130],\n [352, 230], [352, 288]])\n stencil = cv2.fillConvexPoly(white, roi, 0)\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_binary = cv2.threshold(frame_gray, 80, 255, cv2.THRESH_BINARY\n )[1]\n roi_frame = cv2.add(frame_binary, stencil)\n lines = cv2.HoughLinesP(cv2.bitwise_not(roi_frame), 1, theta=np\n .pi / 180, threshold=30, minLineLength=80, maxLineGap=50)\n if str(type(lines)) == \"<class 'numpy.ndarray'>\":\n left_lines, right_lines = split_left_right(lines, 352, 288)\n frame_lines = np.copy(frame)\n if left_lines.size != 0:\n for line in left_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 255, \n 0), 3)\n left_lane = np.mean(left_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = left_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n if right_lines.size != 0:\n for line in right_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (255, 0, \n 0), 3)\n right_lane = np.mean(right_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = right_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n return frame, frame_lines, roi_frame, left_lane, right_lane\n return func_wrapper\n return inner_decorator\n\n\n@dataclass\nclass Video:\n \"\"\"\n Dataclass to manage video input with\n src = 0 -> webcam\n \"\"\"\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n\n def __post_init__(self):\n self.cap = cv2.VideoCapture(self.src)\n self.cap.set(3, self.width)\n self.cap.set(4, self.height)\n\n @lane_detection('square')\n def get_frame(self):\n ret, frame = self.cap.read()\n if ret:\n return frame\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef get_laneangle(lane):\n \"\"\" helper func to calc langeangle in degrees \"\"\"\n x1, y1, x2, y2 = lane[0]\n m = (y2 - y1) / (x2 - x1)\n angle = np.arctan(m)\n return np.degrees(angle)\n\n\ndef lane_detection(roi_shape='square'):\n \"\"\" decorator to detect lanes in video frame \"\"\"\n\n def inner_decorator(func):\n\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n frame = func(*args, **kwargs)\n left_lane, right_lane = np.array([]), np.array([])\n white = np.ones((288, 352, 1), dtype=np.uint8) * 255\n if roi_shape == 'square':\n roi = np.array([[0, frame.shape[0] // 2], [frame.shape[1], \n frame.shape[0] // 2], [frame.shape[1], frame.shape[0]],\n [0, frame.shape[0]]])\n else:\n roi = np.array([[0, 288], [0, 230], [88, 130], [264, 130],\n [352, 230], [352, 288]])\n stencil = cv2.fillConvexPoly(white, roi, 0)\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_binary = cv2.threshold(frame_gray, 80, 255, cv2.THRESH_BINARY\n )[1]\n roi_frame = cv2.add(frame_binary, stencil)\n lines = cv2.HoughLinesP(cv2.bitwise_not(roi_frame), 1, theta=np\n .pi / 180, threshold=30, minLineLength=80, maxLineGap=50)\n if str(type(lines)) == \"<class 'numpy.ndarray'>\":\n left_lines, right_lines = split_left_right(lines, 352, 288)\n frame_lines = np.copy(frame)\n if left_lines.size != 0:\n for line in left_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 255, \n 0), 3)\n left_lane = np.mean(left_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = left_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n if right_lines.size != 0:\n for line in right_lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (255, 0, \n 0), 3)\n right_lane = np.mean(right_lines, axis=0, dtype=np.int32)\n x1, y1, x2, y2 = right_lane[0]\n cv2.line(frame_lines, (x1, y1), (x2, y2), (0, 0, 255), 6)\n return frame, frame_lines, roi_frame, left_lane, right_lane\n return func_wrapper\n return inner_decorator\n\n\n@dataclass\nclass Video:\n \"\"\"\n Dataclass to manage video input with\n src = 0 -> webcam\n \"\"\"\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n\n def __post_init__(self):\n self.cap = cv2.VideoCapture(self.src)\n self.cap.set(3, self.width)\n self.cap.set(4, self.height)\n\n @lane_detection('square')\n def get_frame(self):\n ret, frame = self.cap.read()\n if ret:\n return frame\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef get_laneangle(lane):\n \"\"\" helper func to calc langeangle in degrees \"\"\"\n x1, y1, x2, y2 = lane[0]\n m = (y2 - y1) / (x2 - x1)\n angle = np.arctan(m)\n return np.degrees(angle)\n\n\n<function token>\n\n\n@dataclass\nclass Video:\n \"\"\"\n Dataclass to manage video input with\n src = 0 -> webcam\n \"\"\"\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n\n def __post_init__(self):\n self.cap = cv2.VideoCapture(self.src)\n self.cap.set(3, self.width)\n self.cap.set(4, self.height)\n\n @lane_detection('square')\n def get_frame(self):\n ret, frame = self.cap.read()\n if ret:\n return frame\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\n@dataclass\nclass Video:\n \"\"\"\n Dataclass to manage video input with\n src = 0 -> webcam\n \"\"\"\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n\n def __post_init__(self):\n self.cap = cv2.VideoCapture(self.src)\n self.cap.set(3, self.width)\n self.cap.set(4, self.height)\n\n @lane_detection('square')\n def get_frame(self):\n ret, frame = self.cap.read()\n if ret:\n return frame\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\n@dataclass\nclass Video:\n <docstring token>\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n\n def __post_init__(self):\n self.cap = cv2.VideoCapture(self.src)\n self.cap.set(3, self.width)\n self.cap.set(4, self.height)\n\n @lane_detection('square')\n def get_frame(self):\n ret, frame = self.cap.read()\n if ret:\n return frame\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\n@dataclass\nclass Video:\n <docstring token>\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n <function token>\n\n @lane_detection('square')\n def get_frame(self):\n ret, frame = self.cap.read()\n if ret:\n return frame\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\n@dataclass\nclass Video:\n <docstring token>\n src: int\n width: float\n height: float\n cap: object = field(init=False)\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<code token>\n"
] | false |
98,607 |
ea6c880eb305428b1eefd905dedf1f9cb575bf4d
|
from flask_restx import Resource, fields
from flask import request
import flask
import os
from flask import request
from flask import send_file
import requests
import io
import os
import json
import uuid
from main.booking import ns
roomInfo = [
{
"floor": 1,
"rooms" : [
{"id": 1, "name" : "A"},
{"id": 2, "name" : "B"},
{"id": 3, "name" : "C"},
{"id": 4, "name" : "D"},
{"id": 5, "name" : "E"}
]
},
{
"floor": 2,
"rooms" : [
{"id": 1, "name" : "A"},
{"id": 2, "name" : "B"},
{"id": 3, "name" : "C"},
{"id": 4, "name" : "D"},
{"id": 5, "name" : "E"}
]
},
{
"floor": 3,
"rooms" : [
{"id": 1, "name" : "A"},
{"id": 2, "name" : "B"},
{"id": 3, "name" : "C"},
{"id": 4, "name" : "D"},
{"id": 5, "name" : "E"}
]
},
{
"floor": 4,
"rooms" : [
{"id": 1, "name" : "A"},
{"id": 2, "name" : "B"},
{"id": 3, "name" : "C"},
{"id": 4, "name" : "D"},
{"id": 5, "name" : "E"}
]
},
]
roomStatus = [
{"id" : 1, "status" : "Available", "color": "Green"},
{"id" : 2, "status" : "Occupied", "color": "Red"},
{"id" : 3, "status" : "Vacant", "color": "Orange"},
{"id" : 4, "status" : "Repair", "color": "Gray"}
]
# Above Master JSONs roomInfo and roomStatus can also be stored in any databases or External Data soruces,
# For ease of view i have placed here
roomUpdates = []
# For this program i am updating all the values in local array. We can also do CRUD operations in database. Please
# maintain the server running during all testing cycles. If server restarted all the testing operations needs to be done
# from start
@ns.route('/checkin/<int:floor>/<int:room>', methods=['POST'])
class Checkin(Resource):
# @ns.marshal_with(booking, envelope='booking')
def post(self, floor,room):
try:
if(any(int(x["floor"]) == int(floor) and int(x["room"]) == int(room) for x in roomUpdates)):
return {
'statusCode': 200,
'desc': "Room not Available",
'bookingStatus': 0
}
else:
if(len([x for i,x in enumerate(roomInfo) if int(x["floor"]) == int(floor)]) and int(room) <= 5):
roomUpdates.append({"id": uuid.uuid4(), "floor": int(floor), "room": int(room), "status": 2})
return {
'statusCode': 200,
'desc': "Occupied = Floor : " + str(floor) + ", Room : " + str(room),
'bookingStatus': 1
}
else:
return {
'statusCode': 200,
'desc': "Invalid Floor and Room Number",
'bookingStatus': 2
}
except ValueError:
return {
'statusCode': 500,
'desc': "Error in Checkin. Pleaes try again after some time...",
'bookingStatus': -1
}
@ns.route('/checkout/<int:floor>/<int:room>', methods=['POST'])
class Checkout(Resource):
# @ns.marshal_with(booking, envelope='booking')
def post(self, floor,room):
try:
if(any(x["floor"] == floor and x["room"] == room and x["status"] == 2 for x in roomUpdates)):
indexes = [i for i,x in enumerate(roomUpdates) if x["floor"] == floor and x["room"] == room]
del roomUpdates[indexes[0]]
roomUpdates.append({"floor": int(floor), "room": int(room), "status": 3})
return {
'statusCode': 200,
'desc': "Vacant",
'bookingStatus': 1
}
else:
return {
'statusCode': 200,
'desc': "Cannot checkout. Because Room is not occupied",
'bookingStatus': 0
}
except ValueError:
return {
'statusCode': 500,
'desc': "Error in Checkin. Pleaes try again after some time...",
'bookingStatus': -1
}
@ns.route('/cleaned/<int:floor>/<int:room>', methods=['POST'])
class Cleaned(Resource):
# @ns.marshal_with(booking, envelope='booking')
def post(self, floor,room):
try:
if(any(x["floor"] == floor and x["room"] == room and x["status"] == 3 for x in roomUpdates)):
indexes = [i for i,x in enumerate(roomUpdates) if x["floor"] == floor and x["room"] == room]
del roomUpdates[indexes[0]]
return {
'statusCode': 200,
'desc': "Available",
'bookingStatus': 1
}
else:
return {
'statusCode': 200,
'desc': "Cannot clean. Because Room is not vacant",
'bookingStatus': 1
}
except ValueError:
return {
'statusCode': 500,
'desc': "Error in Checkin. Pleaes try again after some time...",
'bookingStatus': -1
}
@ns.route('/mark-repair/<int:floor>/<int:room>', methods=['POST'])
class Markrepair(Resource):
# @ns.marshal_with(booking, envelope='booking')
def post(self, floor,room):
try:
if(any(x["floor"] == floor and x["room"] == room and x["status"] == 3 for x in roomUpdates)):
indexes = [i for i,x in enumerate(roomUpdates) if x["floor"] == floor and x["room"] == room]
del roomUpdates[indexes[0]]
roomUpdates.append({"floor": int(floor), "room": int(room), "status": 4})
return {
'statusCode': 200,
'desc': "Taken for Repair",
'bookingStatus': 1
}
else:
return {
'statusCode': 200,
'desc': "Cannot take for repair. Because Room is not vacant",
'bookingStatus': 1
}
except ValueError:
return {
'statusCode': 500,
'desc': "Error in Checkin. Pleaes try again after some time...",
'bookingStatus': -1
}
@ns.route('/completed-repair/<int:floor>/<int:room>', methods=['POST'])
class Completedrepair(Resource):
# @ns.marshal_with(booking, envelope='booking')
def post(self, floor,room):
try:
if(any(x["floor"] == floor and x["room"] == room and x["status"] == 4 for x in roomUpdates)):
indexes = [i for i,x in enumerate(roomUpdates) if x["floor"] == floor and x["room"] == room]
del roomUpdates[indexes[0]]
roomUpdates.append({"floor": int(floor), "room": int(room) ,"status": 3})
return {
'statusCode': 200,
'desc': "Repair Completed",
'bookingStatus': 1
}
else:
return {
'statusCode': 200,
'desc': "Cannot mark repair completed. Because Room is not taken for repair",
'bookingStatus': 1
}
except ValueError:
return {
'statusCode': 500,
'desc': "Error in Checkin. Pleaes try again after some time...",
'bookingStatus': -1
}
@ns.route('/available/', methods=['GET'])
class RoomsAvailable(Resource):
# @ns.marshal_with(booking, envelope='booking')
def get(self):
try:
availabile = []
for idxFloor,floor in enumerate(roomInfo):
for idxRoom,room in enumerate(floor["rooms"]):
if(any(int(x["floor"]) == floor["floor"] and int(x["room"]) == room["id"] for x in roomUpdates) == False):
availabile.append(str(floor["floor"]) + "-" + str(room["id"]))
if(idxFloor == len(roomInfo) - 1 and idxRoom == len(floor["rooms"]) - 1):
response = dict();
response['availabile'] = availabile
return {
'statusCode': 200,
'desc': "Available Rooms",
'response': response
}
except ValueError:
return {
'statusCode': 500,
'desc': "Error in Checkin. Pleaes try again after some time...",
'bookingStatus': -1
}
|
[
"from flask_restx import Resource, fields\nfrom flask import request\n\nimport flask\nimport os\nfrom flask import request\nfrom flask import send_file\nimport requests\nimport io\nimport os\nimport json\nimport uuid\n\nfrom main.booking import ns\n\n\nroomInfo = [\n {\n \"floor\": 1,\n \"rooms\" : [\n {\"id\": 1, \"name\" : \"A\"},\n {\"id\": 2, \"name\" : \"B\"},\n {\"id\": 3, \"name\" : \"C\"},\n {\"id\": 4, \"name\" : \"D\"},\n {\"id\": 5, \"name\" : \"E\"}\n ]\n },\n {\n \"floor\": 2,\n \"rooms\" : [\n {\"id\": 1, \"name\" : \"A\"},\n {\"id\": 2, \"name\" : \"B\"},\n {\"id\": 3, \"name\" : \"C\"},\n {\"id\": 4, \"name\" : \"D\"},\n {\"id\": 5, \"name\" : \"E\"}\n ]\n },\n {\n \"floor\": 3,\n \"rooms\" : [\n {\"id\": 1, \"name\" : \"A\"},\n {\"id\": 2, \"name\" : \"B\"},\n {\"id\": 3, \"name\" : \"C\"},\n {\"id\": 4, \"name\" : \"D\"},\n {\"id\": 5, \"name\" : \"E\"}\n ]\n },\n {\n \"floor\": 4,\n \"rooms\" : [\n {\"id\": 1, \"name\" : \"A\"},\n {\"id\": 2, \"name\" : \"B\"},\n {\"id\": 3, \"name\" : \"C\"},\n {\"id\": 4, \"name\" : \"D\"},\n {\"id\": 5, \"name\" : \"E\"}\n ]\n },\n]\n\n\n\nroomStatus = [\n {\"id\" : 1, \"status\" : \"Available\", \"color\": \"Green\"},\n {\"id\" : 2, \"status\" : \"Occupied\", \"color\": \"Red\"},\n {\"id\" : 3, \"status\" : \"Vacant\", \"color\": \"Orange\"},\n {\"id\" : 4, \"status\" : \"Repair\", \"color\": \"Gray\"}\n]\n\n\n# Above Master JSONs roomInfo and roomStatus can also be stored in any databases or External Data soruces,\n# For ease of view i have placed here\n\nroomUpdates = []\n\n# For this program i am updating all the values in local array. We can also do CRUD operations in database. Please\n# maintain the server running during all testing cycles. If server restarted all the testing operations needs to be done\n# from start\n\n\[email protected]('/checkin/<int:floor>/<int:room>', methods=['POST'])\nclass Checkin(Resource):\n # @ns.marshal_with(booking, envelope='booking')\n def post(self, floor,room):\n try:\n if(any(int(x[\"floor\"]) == int(floor) and int(x[\"room\"]) == int(room) for x in roomUpdates)):\n return {\n 'statusCode': 200,\n 'desc': \"Room not Available\",\n 'bookingStatus': 0\n }\n else:\n if(len([x for i,x in enumerate(roomInfo) if int(x[\"floor\"]) == int(floor)]) and int(room) <= 5):\n roomUpdates.append({\"id\": uuid.uuid4(), \"floor\": int(floor), \"room\": int(room), \"status\": 2})\n return {\n 'statusCode': 200,\n 'desc': \"Occupied = Floor : \" + str(floor) + \", Room : \" + str(room),\n 'bookingStatus': 1\n }\n else:\n return {\n 'statusCode': 200,\n 'desc': \"Invalid Floor and Room Number\",\n 'bookingStatus': 2\n }\n except ValueError:\n return {\n 'statusCode': 500,\n 'desc': \"Error in Checkin. Pleaes try again after some time...\",\n 'bookingStatus': -1\n }\n\n\n\n\n\[email protected]('/checkout/<int:floor>/<int:room>', methods=['POST'])\nclass Checkout(Resource):\n # @ns.marshal_with(booking, envelope='booking')\n def post(self, floor,room):\n try:\n if(any(x[\"floor\"] == floor and x[\"room\"] == room and x[\"status\"] == 2 for x in roomUpdates)):\n indexes = [i for i,x in enumerate(roomUpdates) if x[\"floor\"] == floor and x[\"room\"] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({\"floor\": int(floor), \"room\": int(room), \"status\": 3})\n return {\n 'statusCode': 200,\n 'desc': \"Vacant\",\n 'bookingStatus': 1\n }\n else:\n return {\n 'statusCode': 200,\n 'desc': \"Cannot checkout. Because Room is not occupied\",\n 'bookingStatus': 0\n }\n except ValueError:\n return {\n 'statusCode': 500,\n 'desc': \"Error in Checkin. Pleaes try again after some time...\",\n 'bookingStatus': -1\n }\n\n\[email protected]('/cleaned/<int:floor>/<int:room>', methods=['POST'])\nclass Cleaned(Resource):\n # @ns.marshal_with(booking, envelope='booking')\n def post(self, floor,room):\n try:\n if(any(x[\"floor\"] == floor and x[\"room\"] == room and x[\"status\"] == 3 for x in roomUpdates)):\n indexes = [i for i,x in enumerate(roomUpdates) if x[\"floor\"] == floor and x[\"room\"] == room]\n del roomUpdates[indexes[0]]\n return {\n 'statusCode': 200,\n 'desc': \"Available\",\n 'bookingStatus': 1\n }\n else:\n return {\n 'statusCode': 200,\n 'desc': \"Cannot clean. Because Room is not vacant\",\n 'bookingStatus': 1\n }\n except ValueError:\n return {\n 'statusCode': 500,\n 'desc': \"Error in Checkin. Pleaes try again after some time...\",\n 'bookingStatus': -1\n }\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n # @ns.marshal_with(booking, envelope='booking')\n def post(self, floor,room):\n try:\n if(any(x[\"floor\"] == floor and x[\"room\"] == room and x[\"status\"] == 3 for x in roomUpdates)):\n indexes = [i for i,x in enumerate(roomUpdates) if x[\"floor\"] == floor and x[\"room\"] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({\"floor\": int(floor), \"room\": int(room), \"status\": 4})\n return {\n 'statusCode': 200,\n 'desc': \"Taken for Repair\",\n 'bookingStatus': 1\n }\n else:\n return {\n 'statusCode': 200,\n 'desc': \"Cannot take for repair. Because Room is not vacant\",\n 'bookingStatus': 1\n }\n except ValueError:\n return {\n 'statusCode': 500,\n 'desc': \"Error in Checkin. Pleaes try again after some time...\",\n 'bookingStatus': -1\n }\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n # @ns.marshal_with(booking, envelope='booking')\n def post(self, floor,room):\n try:\n if(any(x[\"floor\"] == floor and x[\"room\"] == room and x[\"status\"] == 4 for x in roomUpdates)):\n indexes = [i for i,x in enumerate(roomUpdates) if x[\"floor\"] == floor and x[\"room\"] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({\"floor\": int(floor), \"room\": int(room) ,\"status\": 3})\n return {\n 'statusCode': 200,\n 'desc': \"Repair Completed\",\n 'bookingStatus': 1\n }\n else:\n return {\n 'statusCode': 200,\n 'desc': \"Cannot mark repair completed. Because Room is not taken for repair\",\n 'bookingStatus': 1\n }\n except ValueError:\n return {\n 'statusCode': 500,\n 'desc': \"Error in Checkin. Pleaes try again after some time...\",\n 'bookingStatus': -1\n }\n\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n # @ns.marshal_with(booking, envelope='booking')\n def get(self):\n try:\n availabile = []\n for idxFloor,floor in enumerate(roomInfo):\n for idxRoom,room in enumerate(floor[\"rooms\"]):\n if(any(int(x[\"floor\"]) == floor[\"floor\"] and int(x[\"room\"]) == room[\"id\"] for x in roomUpdates) == False):\n availabile.append(str(floor[\"floor\"]) + \"-\" + str(room[\"id\"]))\n if(idxFloor == len(roomInfo) - 1 and idxRoom == len(floor[\"rooms\"]) - 1):\n response = dict();\n response['availabile'] = availabile\n return {\n 'statusCode': 200,\n 'desc': \"Available Rooms\",\n 'response': response\n }\n except ValueError:\n return {\n 'statusCode': 500,\n 'desc': \"Error in Checkin. Pleaes try again after some time...\",\n 'bookingStatus': -1\n }\n",
"from flask_restx import Resource, fields\nfrom flask import request\nimport flask\nimport os\nfrom flask import request\nfrom flask import send_file\nimport requests\nimport io\nimport os\nimport json\nimport uuid\nfrom main.booking import ns\nroomInfo = [{'floor': 1, 'rooms': [{'id': 1, 'name': 'A'}, {'id': 2, 'name':\n 'B'}, {'id': 3, 'name': 'C'}, {'id': 4, 'name': 'D'}, {'id': 5, 'name':\n 'E'}]}, {'floor': 2, 'rooms': [{'id': 1, 'name': 'A'}, {'id': 2, 'name':\n 'B'}, {'id': 3, 'name': 'C'}, {'id': 4, 'name': 'D'}, {'id': 5, 'name':\n 'E'}]}, {'floor': 3, 'rooms': [{'id': 1, 'name': 'A'}, {'id': 2, 'name':\n 'B'}, {'id': 3, 'name': 'C'}, {'id': 4, 'name': 'D'}, {'id': 5, 'name':\n 'E'}]}, {'floor': 4, 'rooms': [{'id': 1, 'name': 'A'}, {'id': 2, 'name':\n 'B'}, {'id': 3, 'name': 'C'}, {'id': 4, 'name': 'D'}, {'id': 5, 'name':\n 'E'}]}]\nroomStatus = [{'id': 1, 'status': 'Available', 'color': 'Green'}, {'id': 2,\n 'status': 'Occupied', 'color': 'Red'}, {'id': 3, 'status': 'Vacant',\n 'color': 'Orange'}, {'id': 4, 'status': 'Repair', 'color': 'Gray'}]\nroomUpdates = []\n\n\[email protected]('/checkin/<int:floor>/<int:room>', methods=['POST'])\nclass Checkin(Resource):\n\n def post(self, floor, room):\n try:\n if any(int(x['floor']) == int(floor) and int(x['room']) == int(\n room) for x in roomUpdates):\n return {'statusCode': 200, 'desc': 'Room not Available',\n 'bookingStatus': 0}\n elif len([x for i, x in enumerate(roomInfo) if int(x['floor']) ==\n int(floor)]) and int(room) <= 5:\n roomUpdates.append({'id': uuid.uuid4(), 'floor': int(floor),\n 'room': int(room), 'status': 2})\n return {'statusCode': 200, 'desc': 'Occupied = Floor : ' +\n str(floor) + ', Room : ' + str(room), 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Invalid Floor and Room Number', 'bookingStatus': 2}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/checkout/<int:floor>/<int:room>', methods=['POST'])\nclass Checkout(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 2 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Vacant', 'bookingStatus': 1\n }\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot checkout. Because Room is not occupied',\n 'bookingStatus': 0}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/cleaned/<int:floor>/<int:room>', methods=['POST'])\nclass Cleaned(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n return {'statusCode': 200, 'desc': 'Available',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot clean. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 4})\n return {'statusCode': 200, 'desc': 'Taken for Repair',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot take for repair. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\nroomInfo = [{'floor': 1, 'rooms': [{'id': 1, 'name': 'A'}, {'id': 2, 'name':\n 'B'}, {'id': 3, 'name': 'C'}, {'id': 4, 'name': 'D'}, {'id': 5, 'name':\n 'E'}]}, {'floor': 2, 'rooms': [{'id': 1, 'name': 'A'}, {'id': 2, 'name':\n 'B'}, {'id': 3, 'name': 'C'}, {'id': 4, 'name': 'D'}, {'id': 5, 'name':\n 'E'}]}, {'floor': 3, 'rooms': [{'id': 1, 'name': 'A'}, {'id': 2, 'name':\n 'B'}, {'id': 3, 'name': 'C'}, {'id': 4, 'name': 'D'}, {'id': 5, 'name':\n 'E'}]}, {'floor': 4, 'rooms': [{'id': 1, 'name': 'A'}, {'id': 2, 'name':\n 'B'}, {'id': 3, 'name': 'C'}, {'id': 4, 'name': 'D'}, {'id': 5, 'name':\n 'E'}]}]\nroomStatus = [{'id': 1, 'status': 'Available', 'color': 'Green'}, {'id': 2,\n 'status': 'Occupied', 'color': 'Red'}, {'id': 3, 'status': 'Vacant',\n 'color': 'Orange'}, {'id': 4, 'status': 'Repair', 'color': 'Gray'}]\nroomUpdates = []\n\n\[email protected]('/checkin/<int:floor>/<int:room>', methods=['POST'])\nclass Checkin(Resource):\n\n def post(self, floor, room):\n try:\n if any(int(x['floor']) == int(floor) and int(x['room']) == int(\n room) for x in roomUpdates):\n return {'statusCode': 200, 'desc': 'Room not Available',\n 'bookingStatus': 0}\n elif len([x for i, x in enumerate(roomInfo) if int(x['floor']) ==\n int(floor)]) and int(room) <= 5:\n roomUpdates.append({'id': uuid.uuid4(), 'floor': int(floor),\n 'room': int(room), 'status': 2})\n return {'statusCode': 200, 'desc': 'Occupied = Floor : ' +\n str(floor) + ', Room : ' + str(room), 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Invalid Floor and Room Number', 'bookingStatus': 2}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/checkout/<int:floor>/<int:room>', methods=['POST'])\nclass Checkout(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 2 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Vacant', 'bookingStatus': 1\n }\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot checkout. Because Room is not occupied',\n 'bookingStatus': 0}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/cleaned/<int:floor>/<int:room>', methods=['POST'])\nclass Cleaned(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n return {'statusCode': 200, 'desc': 'Available',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot clean. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 4})\n return {'statusCode': 200, 'desc': 'Taken for Repair',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot take for repair. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n\n\[email protected]('/checkin/<int:floor>/<int:room>', methods=['POST'])\nclass Checkin(Resource):\n\n def post(self, floor, room):\n try:\n if any(int(x['floor']) == int(floor) and int(x['room']) == int(\n room) for x in roomUpdates):\n return {'statusCode': 200, 'desc': 'Room not Available',\n 'bookingStatus': 0}\n elif len([x for i, x in enumerate(roomInfo) if int(x['floor']) ==\n int(floor)]) and int(room) <= 5:\n roomUpdates.append({'id': uuid.uuid4(), 'floor': int(floor),\n 'room': int(room), 'status': 2})\n return {'statusCode': 200, 'desc': 'Occupied = Floor : ' +\n str(floor) + ', Room : ' + str(room), 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Invalid Floor and Room Number', 'bookingStatus': 2}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/checkout/<int:floor>/<int:room>', methods=['POST'])\nclass Checkout(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 2 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Vacant', 'bookingStatus': 1\n }\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot checkout. Because Room is not occupied',\n 'bookingStatus': 0}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/cleaned/<int:floor>/<int:room>', methods=['POST'])\nclass Cleaned(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n return {'statusCode': 200, 'desc': 'Available',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot clean. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 4})\n return {'statusCode': 200, 'desc': 'Taken for Repair',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot take for repair. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n\n\[email protected]('/checkin/<int:floor>/<int:room>', methods=['POST'])\nclass Checkin(Resource):\n <function token>\n\n\[email protected]('/checkout/<int:floor>/<int:room>', methods=['POST'])\nclass Checkout(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 2 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Vacant', 'bookingStatus': 1\n }\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot checkout. Because Room is not occupied',\n 'bookingStatus': 0}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/cleaned/<int:floor>/<int:room>', methods=['POST'])\nclass Cleaned(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n return {'statusCode': 200, 'desc': 'Available',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot clean. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 4})\n return {'statusCode': 200, 'desc': 'Taken for Repair',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot take for repair. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n\n\[email protected]('/checkout/<int:floor>/<int:room>', methods=['POST'])\nclass Checkout(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 2 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Vacant', 'bookingStatus': 1\n }\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot checkout. Because Room is not occupied',\n 'bookingStatus': 0}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/cleaned/<int:floor>/<int:room>', methods=['POST'])\nclass Cleaned(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n return {'statusCode': 200, 'desc': 'Available',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot clean. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 4})\n return {'statusCode': 200, 'desc': 'Taken for Repair',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot take for repair. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n\n\[email protected]('/checkout/<int:floor>/<int:room>', methods=['POST'])\nclass Checkout(Resource):\n <function token>\n\n\[email protected]('/cleaned/<int:floor>/<int:room>', methods=['POST'])\nclass Cleaned(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n return {'statusCode': 200, 'desc': 'Available',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot clean. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 4})\n return {'statusCode': 200, 'desc': 'Taken for Repair',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot take for repair. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\[email protected]('/cleaned/<int:floor>/<int:room>', methods=['POST'])\nclass Cleaned(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n return {'statusCode': 200, 'desc': 'Available',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot clean. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 4})\n return {'statusCode': 200, 'desc': 'Taken for Repair',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot take for repair. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\[email protected]('/cleaned/<int:floor>/<int:room>', methods=['POST'])\nclass Cleaned(Resource):\n <function token>\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 4})\n return {'statusCode': 200, 'desc': 'Taken for Repair',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot take for repair. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 3 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 4})\n return {'statusCode': 200, 'desc': 'Taken for Repair',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot take for repair. Because Room is not vacant',\n 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\[email protected]('/mark-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Markrepair(Resource):\n <function token>\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n\n def post(self, floor, room):\n try:\n if any(x['floor'] == floor and x['room'] == room and x['status'\n ] == 4 for x in roomUpdates):\n indexes = [i for i, x in enumerate(roomUpdates) if x[\n 'floor'] == floor and x['room'] == room]\n del roomUpdates[indexes[0]]\n roomUpdates.append({'floor': int(floor), 'room': int(room),\n 'status': 3})\n return {'statusCode': 200, 'desc': 'Repair Completed',\n 'bookingStatus': 1}\n else:\n return {'statusCode': 200, 'desc':\n 'Cannot mark repair completed. Because Room is not taken for repair'\n , 'bookingStatus': 1}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\[email protected]('/completed-repair/<int:floor>/<int:room>', methods=['POST'])\nclass Completedrepair(Resource):\n <function token>\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n\n def get(self):\n try:\n availabile = []\n for idxFloor, floor in enumerate(roomInfo):\n for idxRoom, room in enumerate(floor['rooms']):\n if any(int(x['floor']) == floor['floor'] and int(x[\n 'room']) == room['id'] for x in roomUpdates) == False:\n availabile.append(str(floor['floor']) + '-' + str(\n room['id']))\n if idxFloor == len(roomInfo) - 1 and idxRoom == len(floor\n ['rooms']) - 1:\n response = dict()\n response['availabile'] = availabile\n return {'statusCode': 200, 'desc':\n 'Available Rooms', 'response': response}\n except ValueError:\n return {'statusCode': 500, 'desc':\n 'Error in Checkin. Pleaes try again after some time...',\n 'bookingStatus': -1}\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\[email protected]('/available/', methods=['GET'])\nclass RoomsAvailable(Resource):\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,608 |
bba17f68e48c83a626d408ef201b80cef12de1ab
|
# -*- coding: utf-8 -*-
__author__ = 'Wenzhi Mao'
__version__ = '0.0.1'
__release__ = [int(x) for x in __version__.split('.')]
del x
__all__ = []
from platform import system
from sys import version_info
_system = system()
del system
_PY3K = version_info[0] > 2
_PY2K = not _PY3K
del version_info
|
[
"# -*- coding: utf-8 -*-\n__author__ = 'Wenzhi Mao'\n__version__ = '0.0.1'\n\n__release__ = [int(x) for x in __version__.split('.')]\ndel x\n__all__ = []\n\nfrom platform import system\nfrom sys import version_info\n\n_system = system()\ndel system\n\n_PY3K = version_info[0] > 2\n_PY2K = not _PY3K\ndel version_info\n",
"__author__ = 'Wenzhi Mao'\n__version__ = '0.0.1'\n__release__ = [int(x) for x in __version__.split('.')]\ndel x\n__all__ = []\nfrom platform import system\nfrom sys import version_info\n_system = system()\ndel system\n_PY3K = version_info[0] > 2\n_PY2K = not _PY3K\ndel version_info\n",
"__author__ = 'Wenzhi Mao'\n__version__ = '0.0.1'\n__release__ = [int(x) for x in __version__.split('.')]\ndel x\n__all__ = []\n<import token>\n_system = system()\ndel system\n_PY3K = version_info[0] > 2\n_PY2K = not _PY3K\ndel version_info\n",
"<assignment token>\ndel x\n<assignment token>\n<import token>\n<assignment token>\ndel system\n<assignment token>\ndel version_info\n",
"<assignment token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,609 |
60072c72d3e2a25467e8f9c49589bf6b94fb28d0
|
## UNIT TESTS
import os
import tempfile
import pytest, flask
import datetime as date
from flaskr import flaskr, request, jsonify
from dateutil.relativedelta import relativedelta
app = flask.Flask(_name_)
@pytest.fixture # called by each individual test - simple interface for app to trigger test reqs
def client():
db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp() # return low-level file handle and random file name - latter for db name
flaskr.app.config['TESTING'] = True # disable error catching during request handling for cleaner error reports when testing app
with flaskr.app.test_client() as client:
with flaskr.app.app_context():
flaskr.init_db()
yield client
os.close(db_fd)
os.unlink(flaskr.app.config['DATABASE'])
def test_empty_db(client): # "test" prefix indicates pytest module to run this as a test
""" Start with blank db """
rv = client.get('/')
assert b'No team members here yet' in rv.data
@app.route('/api/team/<string:name>')
def test_correct_age_input(): # test age inputted by post request of user by checking years difference between birthday and today
rv = client.get('/')
json_data = request.get_json()
name = json_data['name']
birthday = json_data['birthday']
age = json_data['age']
date_object = datetime.strptime(birthday, "%d-%m-%y")
difference_in_years = relativedelta(date.today(), start_date).years
if (difference_in_years != age):
assert b'Invalid age inputted' in rv.data
|
[
"## UNIT TESTS\n\nimport os\nimport tempfile\n\nimport pytest, flask\nimport datetime as date\n\nfrom flaskr import flaskr, request, jsonify\nfrom dateutil.relativedelta import relativedelta\n\n\n\napp = flask.Flask(_name_)\n\[email protected] # called by each individual test - simple interface for app to trigger test reqs\ndef client():\n db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp() # return low-level file handle and random file name - latter for db name\n flaskr.app.config['TESTING'] = True # disable error catching during request handling for cleaner error reports when testing app\n\n with flaskr.app.test_client() as client:\n with flaskr.app.app_context():\n flaskr.init_db()\n yield client\n\n os.close(db_fd)\n os.unlink(flaskr.app.config['DATABASE'])\n\ndef test_empty_db(client): # \"test\" prefix indicates pytest module to run this as a test\n \"\"\" Start with blank db \"\"\"\n\n rv = client.get('/')\n assert b'No team members here yet' in rv.data\n\[email protected]('/api/team/<string:name>')\ndef test_correct_age_input(): # test age inputted by post request of user by checking years difference between birthday and today\n rv = client.get('/')\n \n json_data = request.get_json()\n name = json_data['name']\n birthday = json_data['birthday']\n age = json_data['age']\n\n date_object = datetime.strptime(birthday, \"%d-%m-%y\")\n\n difference_in_years = relativedelta(date.today(), start_date).years\n\n if (difference_in_years != age):\n assert b'Invalid age inputted' in rv.data",
"import os\nimport tempfile\nimport pytest, flask\nimport datetime as date\nfrom flaskr import flaskr, request, jsonify\nfrom dateutil.relativedelta import relativedelta\napp = flask.Flask(_name_)\n\n\[email protected]\ndef client():\n db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()\n flaskr.app.config['TESTING'] = True\n with flaskr.app.test_client() as client:\n with flaskr.app.app_context():\n flaskr.init_db()\n yield client\n os.close(db_fd)\n os.unlink(flaskr.app.config['DATABASE'])\n\n\ndef test_empty_db(client):\n \"\"\" Start with blank db \"\"\"\n rv = client.get('/')\n assert b'No team members here yet' in rv.data\n\n\[email protected]('/api/team/<string:name>')\ndef test_correct_age_input():\n rv = client.get('/')\n json_data = request.get_json()\n name = json_data['name']\n birthday = json_data['birthday']\n age = json_data['age']\n date_object = datetime.strptime(birthday, '%d-%m-%y')\n difference_in_years = relativedelta(date.today(), start_date).years\n if difference_in_years != age:\n assert b'Invalid age inputted' in rv.data\n",
"<import token>\napp = flask.Flask(_name_)\n\n\[email protected]\ndef client():\n db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()\n flaskr.app.config['TESTING'] = True\n with flaskr.app.test_client() as client:\n with flaskr.app.app_context():\n flaskr.init_db()\n yield client\n os.close(db_fd)\n os.unlink(flaskr.app.config['DATABASE'])\n\n\ndef test_empty_db(client):\n \"\"\" Start with blank db \"\"\"\n rv = client.get('/')\n assert b'No team members here yet' in rv.data\n\n\[email protected]('/api/team/<string:name>')\ndef test_correct_age_input():\n rv = client.get('/')\n json_data = request.get_json()\n name = json_data['name']\n birthday = json_data['birthday']\n age = json_data['age']\n date_object = datetime.strptime(birthday, '%d-%m-%y')\n difference_in_years = relativedelta(date.today(), start_date).years\n if difference_in_years != age:\n assert b'Invalid age inputted' in rv.data\n",
"<import token>\n<assignment token>\n\n\[email protected]\ndef client():\n db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()\n flaskr.app.config['TESTING'] = True\n with flaskr.app.test_client() as client:\n with flaskr.app.app_context():\n flaskr.init_db()\n yield client\n os.close(db_fd)\n os.unlink(flaskr.app.config['DATABASE'])\n\n\ndef test_empty_db(client):\n \"\"\" Start with blank db \"\"\"\n rv = client.get('/')\n assert b'No team members here yet' in rv.data\n\n\[email protected]('/api/team/<string:name>')\ndef test_correct_age_input():\n rv = client.get('/')\n json_data = request.get_json()\n name = json_data['name']\n birthday = json_data['birthday']\n age = json_data['age']\n date_object = datetime.strptime(birthday, '%d-%m-%y')\n difference_in_years = relativedelta(date.today(), start_date).years\n if difference_in_years != age:\n assert b'Invalid age inputted' in rv.data\n",
"<import token>\n<assignment token>\n\n\[email protected]\ndef client():\n db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()\n flaskr.app.config['TESTING'] = True\n with flaskr.app.test_client() as client:\n with flaskr.app.app_context():\n flaskr.init_db()\n yield client\n os.close(db_fd)\n os.unlink(flaskr.app.config['DATABASE'])\n\n\ndef test_empty_db(client):\n \"\"\" Start with blank db \"\"\"\n rv = client.get('/')\n assert b'No team members here yet' in rv.data\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef test_empty_db(client):\n \"\"\" Start with blank db \"\"\"\n rv = client.get('/')\n assert b'No team members here yet' in rv.data\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,610 |
9608ec4a8b56f597e60d15d4d99b00e33e642ba6
|
def check(v, regex):
if not regex.match(v):
raise ValueError
return v
|
[
"def check(v, regex):\n if not regex.match(v):\n raise ValueError\n return v\n",
"<function token>\n"
] | false |
98,611 |
41adceb63f25afce537b4019f5a2383debf2a8fb
|
#!/usr/bin/python
# coding:utf-8
from builder import Builder
class HTMLBuilder( Builder ):
strBuffer = []
def makeTitle( self, str ):
self.strBuffer.append(str + ".html\n")
self.strBuffer.append("<html><head><title>" +str+ "</title></head><body>")
def makeString( self, str ):
self.strBuffer.append("<p>" +str+ "</p>")
def makeItems( self, str ):
for i in str:
self.strBuffer.append("<li>"+i+ "</li>" )
def close( self ):
self.strBuffer.append("</body></html>")
def getResult( self ):
return self.strBuffer
|
[
"#!/usr/bin/python\n# coding:utf-8\n\nfrom builder import Builder\n\nclass HTMLBuilder( Builder ):\n strBuffer = []\n def makeTitle( self, str ):\n self.strBuffer.append(str + \".html\\n\")\n self.strBuffer.append(\"<html><head><title>\" +str+ \"</title></head><body>\")\n def makeString( self, str ):\n self.strBuffer.append(\"<p>\" +str+ \"</p>\")\n def makeItems( self, str ):\n for i in str:\n self.strBuffer.append(\"<li>\"+i+ \"</li>\" )\n def close( self ):\n self.strBuffer.append(\"</body></html>\")\n def getResult( self ):\n return self.strBuffer\n\n",
"from builder import Builder\n\n\nclass HTMLBuilder(Builder):\n strBuffer = []\n\n def makeTitle(self, str):\n self.strBuffer.append(str + '.html\\n')\n self.strBuffer.append('<html><head><title>' + str +\n '</title></head><body>')\n\n def makeString(self, str):\n self.strBuffer.append('<p>' + str + '</p>')\n\n def makeItems(self, str):\n for i in str:\n self.strBuffer.append('<li>' + i + '</li>')\n\n def close(self):\n self.strBuffer.append('</body></html>')\n\n def getResult(self):\n return self.strBuffer\n",
"<import token>\n\n\nclass HTMLBuilder(Builder):\n strBuffer = []\n\n def makeTitle(self, str):\n self.strBuffer.append(str + '.html\\n')\n self.strBuffer.append('<html><head><title>' + str +\n '</title></head><body>')\n\n def makeString(self, str):\n self.strBuffer.append('<p>' + str + '</p>')\n\n def makeItems(self, str):\n for i in str:\n self.strBuffer.append('<li>' + i + '</li>')\n\n def close(self):\n self.strBuffer.append('</body></html>')\n\n def getResult(self):\n return self.strBuffer\n",
"<import token>\n\n\nclass HTMLBuilder(Builder):\n <assignment token>\n\n def makeTitle(self, str):\n self.strBuffer.append(str + '.html\\n')\n self.strBuffer.append('<html><head><title>' + str +\n '</title></head><body>')\n\n def makeString(self, str):\n self.strBuffer.append('<p>' + str + '</p>')\n\n def makeItems(self, str):\n for i in str:\n self.strBuffer.append('<li>' + i + '</li>')\n\n def close(self):\n self.strBuffer.append('</body></html>')\n\n def getResult(self):\n return self.strBuffer\n",
"<import token>\n\n\nclass HTMLBuilder(Builder):\n <assignment token>\n\n def makeTitle(self, str):\n self.strBuffer.append(str + '.html\\n')\n self.strBuffer.append('<html><head><title>' + str +\n '</title></head><body>')\n\n def makeString(self, str):\n self.strBuffer.append('<p>' + str + '</p>')\n\n def makeItems(self, str):\n for i in str:\n self.strBuffer.append('<li>' + i + '</li>')\n\n def close(self):\n self.strBuffer.append('</body></html>')\n <function token>\n",
"<import token>\n\n\nclass HTMLBuilder(Builder):\n <assignment token>\n <function token>\n\n def makeString(self, str):\n self.strBuffer.append('<p>' + str + '</p>')\n\n def makeItems(self, str):\n for i in str:\n self.strBuffer.append('<li>' + i + '</li>')\n\n def close(self):\n self.strBuffer.append('</body></html>')\n <function token>\n",
"<import token>\n\n\nclass HTMLBuilder(Builder):\n <assignment token>\n <function token>\n\n def makeString(self, str):\n self.strBuffer.append('<p>' + str + '</p>')\n\n def makeItems(self, str):\n for i in str:\n self.strBuffer.append('<li>' + i + '</li>')\n <function token>\n <function token>\n",
"<import token>\n\n\nclass HTMLBuilder(Builder):\n <assignment token>\n <function token>\n <function token>\n\n def makeItems(self, str):\n for i in str:\n self.strBuffer.append('<li>' + i + '</li>')\n <function token>\n <function token>\n",
"<import token>\n\n\nclass HTMLBuilder(Builder):\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,612 |
c9744f1eb8e9bae2d511426063daef51551391a1
|
from picamera import PiCamera
camera = PiCamera()
camera.capture('/home/pi/Desktop/img.jpg')
|
[
"from picamera import PiCamera\ncamera = PiCamera()\ncamera.capture('/home/pi/Desktop/img.jpg')",
"from picamera import PiCamera\ncamera = PiCamera()\ncamera.capture('/home/pi/Desktop/img.jpg')\n",
"<import token>\ncamera = PiCamera()\ncamera.capture('/home/pi/Desktop/img.jpg')\n",
"<import token>\n<assignment token>\ncamera.capture('/home/pi/Desktop/img.jpg')\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,613 |
cc4c07963e1df3ee3d431c0127f8492f46529f38
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import math
def grafici(stato):
intestazione="Stato " + stato
print(intestazione)
if stato=="solido":
titolo1="sol"
if stato=="liquido":
titolo1="liq"
if stato=="gassoso":
titolo1="gas"
nome_file_1="output.epot."+titolo1+".0"
bl,energia,err = np.loadtxt(nome_file_1, usecols=(0,2,3), delimiter=' ', unpack='true')
energia=energia*120.*1.380649*(10**(-23)) #conversione in SI
err=err*120.*1.380649*(10**(-23))
plt.errorbar(bl,energia,yerr=err)
plt.title("Andamento energia potenziale per particella")
plt.xlabel("numero blocchi")
plt.ylabel("U/N [J]")
plt.grid(True)
plt.show()
nome_file_2="output.pres."+titolo1+".0"
bl,pres,err = np.loadtxt(nome_file_2, usecols=(0,2,3), delimiter=' ', unpack='true')
pres=pres*120.*1.380649*(10**(-23))/((0.34*(10**(-9)))**3)
err=err*120.*1.380649*(10**(-23))/((0.34*(10**(-9)))**3)
plt.errorbar(bl,pres,yerr=err)
plt.title("Andamento pressione")
plt.xlabel("numero blocchi")
plt.ylabel("P[Pa]")
plt.grid(True)
plt.show()
grafici("solido")
grafici("liquido")
grafici("gassoso")
|
[
"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n\ndef grafici(stato):\n\tintestazione=\"Stato \" + stato\n\tprint(intestazione)\n\t\n\tif stato==\"solido\":\n\t\ttitolo1=\"sol\"\n\tif stato==\"liquido\":\n\t\ttitolo1=\"liq\"\n\tif stato==\"gassoso\":\n\t\ttitolo1=\"gas\"\n\t\t\n\tnome_file_1=\"output.epot.\"+titolo1+\".0\"\n\t\n\tbl,energia,err = np.loadtxt(nome_file_1, usecols=(0,2,3), delimiter=' ', unpack='true')\n\t\n\tenergia=energia*120.*1.380649*(10**(-23)) #conversione in SI\n\terr=err*120.*1.380649*(10**(-23))\n\t\n\tplt.errorbar(bl,energia,yerr=err)\n\tplt.title(\"Andamento energia potenziale per particella\")\n\tplt.xlabel(\"numero blocchi\")\n\tplt.ylabel(\"U/N [J]\")\n\tplt.grid(True)\n\tplt.show()\n\t\n\tnome_file_2=\"output.pres.\"+titolo1+\".0\"\n\tbl,pres,err = np.loadtxt(nome_file_2, usecols=(0,2,3), delimiter=' ', unpack='true')\n\t\n\tpres=pres*120.*1.380649*(10**(-23))/((0.34*(10**(-9)))**3)\n\terr=err*120.*1.380649*(10**(-23))/((0.34*(10**(-9)))**3)\n\t\n\tplt.errorbar(bl,pres,yerr=err)\t\n\tplt.title(\"Andamento pressione\")\n\tplt.xlabel(\"numero blocchi\")\n\tplt.ylabel(\"P[Pa]\")\n\tplt.grid(True)\n\tplt.show()\t\n\t\ngrafici(\"solido\")\ngrafici(\"liquido\")\ngrafici(\"gassoso\")\n\t\n\t\n",
"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n\ndef grafici(stato):\n intestazione = 'Stato ' + stato\n print(intestazione)\n if stato == 'solido':\n titolo1 = 'sol'\n if stato == 'liquido':\n titolo1 = 'liq'\n if stato == 'gassoso':\n titolo1 = 'gas'\n nome_file_1 = 'output.epot.' + titolo1 + '.0'\n bl, energia, err = np.loadtxt(nome_file_1, usecols=(0, 2, 3), delimiter\n =' ', unpack='true')\n energia = energia * 120.0 * 1.380649 * 10 ** -23\n err = err * 120.0 * 1.380649 * 10 ** -23\n plt.errorbar(bl, energia, yerr=err)\n plt.title('Andamento energia potenziale per particella')\n plt.xlabel('numero blocchi')\n plt.ylabel('U/N [J]')\n plt.grid(True)\n plt.show()\n nome_file_2 = 'output.pres.' + titolo1 + '.0'\n bl, pres, err = np.loadtxt(nome_file_2, usecols=(0, 2, 3), delimiter=\n ' ', unpack='true')\n pres = pres * 120.0 * 1.380649 * 10 ** -23 / (0.34 * 10 ** -9) ** 3\n err = err * 120.0 * 1.380649 * 10 ** -23 / (0.34 * 10 ** -9) ** 3\n plt.errorbar(bl, pres, yerr=err)\n plt.title('Andamento pressione')\n plt.xlabel('numero blocchi')\n plt.ylabel('P[Pa]')\n plt.grid(True)\n plt.show()\n\n\ngrafici('solido')\ngrafici('liquido')\ngrafici('gassoso')\n",
"<import token>\n\n\ndef grafici(stato):\n intestazione = 'Stato ' + stato\n print(intestazione)\n if stato == 'solido':\n titolo1 = 'sol'\n if stato == 'liquido':\n titolo1 = 'liq'\n if stato == 'gassoso':\n titolo1 = 'gas'\n nome_file_1 = 'output.epot.' + titolo1 + '.0'\n bl, energia, err = np.loadtxt(nome_file_1, usecols=(0, 2, 3), delimiter\n =' ', unpack='true')\n energia = energia * 120.0 * 1.380649 * 10 ** -23\n err = err * 120.0 * 1.380649 * 10 ** -23\n plt.errorbar(bl, energia, yerr=err)\n plt.title('Andamento energia potenziale per particella')\n plt.xlabel('numero blocchi')\n plt.ylabel('U/N [J]')\n plt.grid(True)\n plt.show()\n nome_file_2 = 'output.pres.' + titolo1 + '.0'\n bl, pres, err = np.loadtxt(nome_file_2, usecols=(0, 2, 3), delimiter=\n ' ', unpack='true')\n pres = pres * 120.0 * 1.380649 * 10 ** -23 / (0.34 * 10 ** -9) ** 3\n err = err * 120.0 * 1.380649 * 10 ** -23 / (0.34 * 10 ** -9) ** 3\n plt.errorbar(bl, pres, yerr=err)\n plt.title('Andamento pressione')\n plt.xlabel('numero blocchi')\n plt.ylabel('P[Pa]')\n plt.grid(True)\n plt.show()\n\n\ngrafici('solido')\ngrafici('liquido')\ngrafici('gassoso')\n",
"<import token>\n\n\ndef grafici(stato):\n intestazione = 'Stato ' + stato\n print(intestazione)\n if stato == 'solido':\n titolo1 = 'sol'\n if stato == 'liquido':\n titolo1 = 'liq'\n if stato == 'gassoso':\n titolo1 = 'gas'\n nome_file_1 = 'output.epot.' + titolo1 + '.0'\n bl, energia, err = np.loadtxt(nome_file_1, usecols=(0, 2, 3), delimiter\n =' ', unpack='true')\n energia = energia * 120.0 * 1.380649 * 10 ** -23\n err = err * 120.0 * 1.380649 * 10 ** -23\n plt.errorbar(bl, energia, yerr=err)\n plt.title('Andamento energia potenziale per particella')\n plt.xlabel('numero blocchi')\n plt.ylabel('U/N [J]')\n plt.grid(True)\n plt.show()\n nome_file_2 = 'output.pres.' + titolo1 + '.0'\n bl, pres, err = np.loadtxt(nome_file_2, usecols=(0, 2, 3), delimiter=\n ' ', unpack='true')\n pres = pres * 120.0 * 1.380649 * 10 ** -23 / (0.34 * 10 ** -9) ** 3\n err = err * 120.0 * 1.380649 * 10 ** -23 / (0.34 * 10 ** -9) ** 3\n plt.errorbar(bl, pres, yerr=err)\n plt.title('Andamento pressione')\n plt.xlabel('numero blocchi')\n plt.ylabel('P[Pa]')\n plt.grid(True)\n plt.show()\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
98,614 |
f7bb8e18e438a9771e372153df7c188b025cbf8e
|
import secrets
import time
import random
import pandas as pd
from typing import Dict, Callable, Any
# cadCAD configuration modules
from cadCAD.configuration.utils import config_sim
from cadCAD.configuration import Experiment
# cadCAD simulation engine modules
from cadCAD.engine import ExecutionMode, ExecutionContext
from cadCAD.engine import Executor
from cadCAD import configs
del configs[:]
from .specs import (
Deposit, DepositData, BeaconState,
SECONDS_PER_SLOT, SLOTS_PER_EPOCH,
initialize_beacon_state_from_eth1,
)
from .network import (
Network,
update_network, disseminate_attestations,
disseminate_block, knowledge_set,
)
from .utils.cadCADsupSUP import (
get_observed_psubs,
get_observed_initial_conditions,
add_loop_ic,
add_loop_psubs,
)
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.utils.ssz.ssz_typing import Bitlist, uint64
from eth2spec.utils.hash_function import hash
from .utils.eth2 import eth_to_gwei
## Initialisation
def get_initial_deposits(validators):
"""Produce deposits
Args:
validators (Sequence[BRValidator]): Validators of the simulation
Returns:
List[Deposit]: The list of deposits
"""
return [Deposit(
data=DepositData(
amount=eth_to_gwei(32),
pubkey=v.pubkey)
) for v in validators]
def get_genesis_state(validators, seed="hello"):
block_hash = hash(seed.encode("utf-8"))
eth1_timestamp = 1578009600
return initialize_beacon_state_from_eth1(
block_hash, eth1_timestamp, get_initial_deposits(validators)
)
def skip_genesis_block(validators):
for validator in validators:
validator.forward_by(SECONDS_PER_SLOT)
## State transitions
def tick(params, step, sL, s, _input):
'''
We call tick to move the clock by one step (= a second if frequency is 1, a tenth of a second
if frequency is 10 etc). When tick moves the clock past the start of a new slot, validators
update their internals, checking for instance their new attester or proposer duties if this
tick coincides with a new epoch.
Whenever tick is called, we also check whether we want the network to update or not, by
flipping a biased coin. By "updating the network", we mean "peers exchange messages". In the
chain example above, with 4 validators arranged as 0 <-> 1 <-> 2 <-> 3, it takes two network
updates for a message from validator 3 to reach validator 0 (when validator 3 sends their
message, we assume that it reaches all their peers instantly).
The update frequency of the network is represented by the network_update_rate simulation
parameter, also in Hertz. A network_update_rate of 1 means that messages spread one step
further on the network each second.
'''
frequency = params["frequency"] # How many times per second we update the simulation.
network_update_rate = params["network_update_rate"] # How many steps do messages propagate per second
# Probably overkill
assert frequency >= network_update_rate
network = s["network"]
update_prob = float(network_update_rate) / float(frequency)
# If we draw a success, based on `update_prob`, update the network
if random.random() < update_prob:
update_network(network)
# Push validators' clocks by one step
for validator in network.validators:
validator.update_time(frequency)
if s["timestep"] % 100 == 0:
print("timestep", s["timestep"], "of run", s["run"])
return ("network", network)
def update_attestations(params, step, sL, s, _input):
# Get the attestations and disseminate them on-the-wire
network = s["network"]
disseminate_attestations(network, _input["attestations"])
return ('network', network)
def update_blocks(params, step, sL, s, _input):
# Get the blocks proposed and disseminate them on-the-wire
network = s["network"]
for block in _input["blocks"]:
disseminate_block(network, block.message.proposer_index, block)
return ('network', network)
## Policies
### Attestations
def attest_policy(params, step, sL, s):
# Pinging validators to check if anyone wants to attest
network = s['network']
produced_attestations = []
for validator_index, validator in enumerate(network.validators):
known_items = knowledge_set(network, validator_index)
attestation = validator.attest(known_items)
if attestation is not None:
produced_attestations.append([validator_index, attestation])
return ({ 'attestations': produced_attestations })
### Block proposal
def propose_policy(params, step, sL, s):
# Pinging validators to check if anyone wants to propose a block
network = s['network']
produced_blocks = []
for validator_index, validator in enumerate(network.validators):
known_items = knowledge_set(network, validator_index) # Known attestations&blocks of ValidatorIndex. (attestation info required to aggregate and put into block!)
block = validator.propose(known_items) # Check, if supposed to propose and if yes, propose!
if block is not None:
produced_blocks.append(block)
return ({ 'blocks': produced_blocks })
### Simulator shell
class SimulationParameters:
num_epochs: uint64
num_run: uint64
frequency: uint64
network_update_rate: float
def __init__(self, obj):
self.num_epochs = obj["num_epochs"]
self.num_run = obj["num_run"]
self.frequency = obj["frequency"]
self.network_update_rate = obj["network_update_rate"]
def simulate(network: Network, parameters: SimulationParameters, observers: Dict[str, Callable[[BeaconState], Any]] = {}) -> pd.DataFrame:
"""
Args:
network (Network): Network of :py:class:`beaconrunner.validatorlib.BRValidator`
parameters (BRSimulationParameters): Simulation parameters
Returns:
pandas.DataFrame: Results of the simulation contained in a pandas data frame
"""
initial_conditions = {
'network': network
}
psubs = [
{
'policies': {
'action': attest_policy # Ping all validators and check if they want to attest
},
'variables': {
'network': update_attestations # Send attestations to direct peers respectively
}
},
{
'policies': {
'action': propose_policy # Propose block if supposed to
},
'variables': {
'network': update_blocks # Send block to direct peers respectively.
}
},
{
'policies': {
},
'variables': {
'network': tick # step 5
}
},
]
# Determine how many steps the simulation is running for
num_slots = parameters.num_epochs * SLOTS_PER_EPOCH
steps = int(num_slots * SECONDS_PER_SLOT * parameters.frequency)
params = {
"frequency": [parameters.frequency],
"network_update_rate": [parameters.network_update_rate],
}
print("will simulate", parameters.num_epochs, "epochs (", num_slots, "slots ) at frequency", parameters.frequency, "moves/second")
print("total", steps, "simulation steps")
# Add our observers to the simulation
observed_ic = get_observed_initial_conditions(initial_conditions, observers)
observed_psubs = get_observed_psubs(psubs, observers)
# observed_params = add_loop_params(get_observed_params(params, observers))
sim_config = config_sim({
'T': range(steps),
'N': 1,
'M': {
'frequency': [parameters.frequency],
'network_update_rate': [parameters.network_update_rate],
}
})
from cadCAD import configs
del configs[:]
# Final simulation parameters and execution
experiment = Experiment()
experiment.append_configs(
initial_state = observed_ic,
partial_state_update_blocks = observed_psubs,
sim_configs = sim_config
)
exec_context = ExecutionContext()
simulation = Executor(exec_context=exec_context, configs=configs)
raw_result, tensor, sessions = simulation.execute()
return pd.DataFrame(raw_result)
|
[
"import secrets\nimport time\nimport random\nimport pandas as pd\n\nfrom typing import Dict, Callable, Any\n\n# cadCAD configuration modules\nfrom cadCAD.configuration.utils import config_sim\nfrom cadCAD.configuration import Experiment\n\n# cadCAD simulation engine modules\nfrom cadCAD.engine import ExecutionMode, ExecutionContext\nfrom cadCAD.engine import Executor\n\nfrom cadCAD import configs\ndel configs[:]\n\nfrom .specs import (\n Deposit, DepositData, BeaconState,\n SECONDS_PER_SLOT, SLOTS_PER_EPOCH,\n initialize_beacon_state_from_eth1,\n)\nfrom .network import (\n Network,\n update_network, disseminate_attestations,\n disseminate_block, knowledge_set,\n)\n\nfrom .utils.cadCADsupSUP import (\n get_observed_psubs,\n get_observed_initial_conditions,\n add_loop_ic,\n add_loop_psubs,\n)\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root\nfrom eth2spec.utils.ssz.ssz_typing import Bitlist, uint64\nfrom eth2spec.utils.hash_function import hash\nfrom .utils.eth2 import eth_to_gwei\n\n## Initialisation\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n\n return [Deposit(\n data=DepositData(\n amount=eth_to_gwei(32),\n pubkey=v.pubkey)\n ) for v in validators]\n\ndef get_genesis_state(validators, seed=\"hello\"):\n block_hash = hash(seed.encode(\"utf-8\"))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(\n block_hash, eth1_timestamp, get_initial_deposits(validators)\n )\n\ndef skip_genesis_block(validators):\n for validator in validators:\n validator.forward_by(SECONDS_PER_SLOT)\n\n## State transitions\n\ndef tick(params, step, sL, s, _input):\n '''\n We call tick to move the clock by one step (= a second if frequency is 1, a tenth of a second \n if frequency is 10 etc). When tick moves the clock past the start of a new slot, validators \n update their internals, checking for instance their new attester or proposer duties if this \n tick coincides with a new epoch.\n\n Whenever tick is called, we also check whether we want the network to update or not, by \n flipping a biased coin. By \"updating the network\", we mean \"peers exchange messages\". In the \n chain example above, with 4 validators arranged as 0 <-> 1 <-> 2 <-> 3, it takes two network \n updates for a message from validator 3 to reach validator 0 (when validator 3 sends their \n message, we assume that it reaches all their peers instantly).\n\n The update frequency of the network is represented by the network_update_rate simulation \n parameter, also in Hertz. A network_update_rate of 1 means that messages spread one step \n further on the network each second.\n '''\n\n frequency = params[\"frequency\"] # How many times per second we update the simulation.\n network_update_rate = params[\"network_update_rate\"] # How many steps do messages propagate per second\n\n # Probably overkill\n assert frequency >= network_update_rate\n\n network = s[\"network\"]\n\n update_prob = float(network_update_rate) / float(frequency)\n\n # If we draw a success, based on `update_prob`, update the network\n if random.random() < update_prob:\n update_network(network)\n\n # Push validators' clocks by one step\n for validator in network.validators:\n validator.update_time(frequency)\n\n if s[\"timestep\"] % 100 == 0:\n print(\"timestep\", s[\"timestep\"], \"of run\", s[\"run\"])\n\n return (\"network\", network)\n\ndef update_attestations(params, step, sL, s, _input):\n # Get the attestations and disseminate them on-the-wire\n network = s[\"network\"]\n disseminate_attestations(network, _input[\"attestations\"])\n\n return ('network', network)\n\ndef update_blocks(params, step, sL, s, _input):\n # Get the blocks proposed and disseminate them on-the-wire\n\n network = s[\"network\"]\n for block in _input[\"blocks\"]:\n disseminate_block(network, block.message.proposer_index, block)\n\n return ('network', network)\n\n## Policies\n\n### Attestations\n\ndef attest_policy(params, step, sL, s):\n # Pinging validators to check if anyone wants to attest\n\n network = s['network']\n produced_attestations = []\n\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n\n return ({ 'attestations': produced_attestations })\n\n### Block proposal\n\ndef propose_policy(params, step, sL, s):\n # Pinging validators to check if anyone wants to propose a block\n\n network = s['network']\n produced_blocks = []\n\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index) # Known attestations&blocks of ValidatorIndex. (attestation info required to aggregate and put into block!)\n block = validator.propose(known_items) # Check, if supposed to propose and if yes, propose!\n if block is not None:\n produced_blocks.append(block)\n\n return ({ 'blocks': produced_blocks })\n\n### Simulator shell\n\nclass SimulationParameters:\n\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj[\"num_epochs\"]\n self.num_run = obj[\"num_run\"]\n self.frequency = obj[\"frequency\"]\n self.network_update_rate = obj[\"network_update_rate\"]\n\ndef simulate(network: Network, parameters: SimulationParameters, observers: Dict[str, Callable[[BeaconState], Any]] = {}) -> pd.DataFrame:\n \"\"\"\n Args:\n network (Network): Network of :py:class:`beaconrunner.validatorlib.BRValidator`\n parameters (BRSimulationParameters): Simulation parameters\n\n Returns:\n pandas.DataFrame: Results of the simulation contained in a pandas data frame\n \"\"\"\n\n initial_conditions = {\n 'network': network\n }\n\n psubs = [\n {\n 'policies': {\n 'action': attest_policy # Ping all validators and check if they want to attest\n },\n 'variables': {\n 'network': update_attestations # Send attestations to direct peers respectively\n }\n },\n {\n 'policies': {\n 'action': propose_policy # Propose block if supposed to\n },\n 'variables': {\n 'network': update_blocks # Send block to direct peers respectively.\n }\n },\n {\n 'policies': {\n },\n 'variables': {\n 'network': tick # step 5\n }\n },\n ]\n\n # Determine how many steps the simulation is running for\n num_slots = parameters.num_epochs * SLOTS_PER_EPOCH\n steps = int(num_slots * SECONDS_PER_SLOT * parameters.frequency)\n\n params = {\n \"frequency\": [parameters.frequency],\n \"network_update_rate\": [parameters.network_update_rate],\n }\n\n print(\"will simulate\", parameters.num_epochs, \"epochs (\", num_slots, \"slots ) at frequency\", parameters.frequency, \"moves/second\")\n print(\"total\", steps, \"simulation steps\")\n\n # Add our observers to the simulation\n observed_ic = get_observed_initial_conditions(initial_conditions, observers)\n observed_psubs = get_observed_psubs(psubs, observers)\n # observed_params = add_loop_params(get_observed_params(params, observers))\n\n sim_config = config_sim({\n 'T': range(steps),\n 'N': 1,\n 'M': {\n 'frequency': [parameters.frequency],\n 'network_update_rate': [parameters.network_update_rate],\n }\n })\n\n from cadCAD import configs\n del configs[:]\n\n # Final simulation parameters and execution\n experiment = Experiment()\n experiment.append_configs(\n initial_state = observed_ic,\n partial_state_update_blocks = observed_psubs,\n sim_configs = sim_config\n )\n\n exec_context = ExecutionContext()\n simulation = Executor(exec_context=exec_context, configs=configs)\n raw_result, tensor, sessions = simulation.execute()\n\n return pd.DataFrame(raw_result)\n",
"import secrets\nimport time\nimport random\nimport pandas as pd\nfrom typing import Dict, Callable, Any\nfrom cadCAD.configuration.utils import config_sim\nfrom cadCAD.configuration import Experiment\nfrom cadCAD.engine import ExecutionMode, ExecutionContext\nfrom cadCAD.engine import Executor\nfrom cadCAD import configs\ndel configs[:]\nfrom .specs import Deposit, DepositData, BeaconState, SECONDS_PER_SLOT, SLOTS_PER_EPOCH, initialize_beacon_state_from_eth1\nfrom .network import Network, update_network, disseminate_attestations, disseminate_block, knowledge_set\nfrom .utils.cadCADsupSUP import get_observed_psubs, get_observed_initial_conditions, add_loop_ic, add_loop_psubs\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root\nfrom eth2spec.utils.ssz.ssz_typing import Bitlist, uint64\nfrom eth2spec.utils.hash_function import hash\nfrom .utils.eth2 import eth_to_gwei\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\ndef get_genesis_state(validators, seed='hello'):\n block_hash = hash(seed.encode('utf-8'))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(block_hash, eth1_timestamp,\n get_initial_deposits(validators))\n\n\ndef skip_genesis_block(validators):\n for validator in validators:\n validator.forward_by(SECONDS_PER_SLOT)\n\n\ndef tick(params, step, sL, s, _input):\n \"\"\"\n We call tick to move the clock by one step (= a second if frequency is 1, a tenth of a second \n if frequency is 10 etc). When tick moves the clock past the start of a new slot, validators \n update their internals, checking for instance their new attester or proposer duties if this \n tick coincides with a new epoch.\n\n Whenever tick is called, we also check whether we want the network to update or not, by \n flipping a biased coin. By \"updating the network\", we mean \"peers exchange messages\". In the \n chain example above, with 4 validators arranged as 0 <-> 1 <-> 2 <-> 3, it takes two network \n updates for a message from validator 3 to reach validator 0 (when validator 3 sends their \n message, we assume that it reaches all their peers instantly).\n\n The update frequency of the network is represented by the network_update_rate simulation \n parameter, also in Hertz. A network_update_rate of 1 means that messages spread one step \n further on the network each second.\n \"\"\"\n frequency = params['frequency']\n network_update_rate = params['network_update_rate']\n assert frequency >= network_update_rate\n network = s['network']\n update_prob = float(network_update_rate) / float(frequency)\n if random.random() < update_prob:\n update_network(network)\n for validator in network.validators:\n validator.update_time(frequency)\n if s['timestep'] % 100 == 0:\n print('timestep', s['timestep'], 'of run', s['run'])\n return 'network', network\n\n\ndef update_attestations(params, step, sL, s, _input):\n network = s['network']\n disseminate_attestations(network, _input['attestations'])\n return 'network', network\n\n\ndef update_blocks(params, step, sL, s, _input):\n network = s['network']\n for block in _input['blocks']:\n disseminate_block(network, block.message.proposer_index, block)\n return 'network', network\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\ndef propose_policy(params, step, sL, s):\n network = s['network']\n produced_blocks = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n block = validator.propose(known_items)\n if block is not None:\n produced_blocks.append(block)\n return {'blocks': produced_blocks}\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\ndef simulate(network: Network, parameters: SimulationParameters, observers:\n Dict[str, Callable[[BeaconState], Any]]={}) ->pd.DataFrame:\n \"\"\"\n Args:\n network (Network): Network of :py:class:`beaconrunner.validatorlib.BRValidator`\n parameters (BRSimulationParameters): Simulation parameters\n\n Returns:\n pandas.DataFrame: Results of the simulation contained in a pandas data frame\n \"\"\"\n initial_conditions = {'network': network}\n psubs = [{'policies': {'action': attest_policy}, 'variables': {\n 'network': update_attestations}}, {'policies': {'action':\n propose_policy}, 'variables': {'network': update_blocks}}, {\n 'policies': {}, 'variables': {'network': tick}}]\n num_slots = parameters.num_epochs * SLOTS_PER_EPOCH\n steps = int(num_slots * SECONDS_PER_SLOT * parameters.frequency)\n params = {'frequency': [parameters.frequency], 'network_update_rate': [\n parameters.network_update_rate]}\n print('will simulate', parameters.num_epochs, 'epochs (', num_slots,\n 'slots ) at frequency', parameters.frequency, 'moves/second')\n print('total', steps, 'simulation steps')\n observed_ic = get_observed_initial_conditions(initial_conditions, observers\n )\n observed_psubs = get_observed_psubs(psubs, observers)\n sim_config = config_sim({'T': range(steps), 'N': 1, 'M': {'frequency':\n [parameters.frequency], 'network_update_rate': [parameters.\n network_update_rate]}})\n from cadCAD import configs\n del configs[:]\n experiment = Experiment()\n experiment.append_configs(initial_state=observed_ic,\n partial_state_update_blocks=observed_psubs, sim_configs=sim_config)\n exec_context = ExecutionContext()\n simulation = Executor(exec_context=exec_context, configs=configs)\n raw_result, tensor, sessions = simulation.execute()\n return pd.DataFrame(raw_result)\n",
"<import token>\ndel configs[:]\n<import token>\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\ndef get_genesis_state(validators, seed='hello'):\n block_hash = hash(seed.encode('utf-8'))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(block_hash, eth1_timestamp,\n get_initial_deposits(validators))\n\n\ndef skip_genesis_block(validators):\n for validator in validators:\n validator.forward_by(SECONDS_PER_SLOT)\n\n\ndef tick(params, step, sL, s, _input):\n \"\"\"\n We call tick to move the clock by one step (= a second if frequency is 1, a tenth of a second \n if frequency is 10 etc). When tick moves the clock past the start of a new slot, validators \n update their internals, checking for instance their new attester or proposer duties if this \n tick coincides with a new epoch.\n\n Whenever tick is called, we also check whether we want the network to update or not, by \n flipping a biased coin. By \"updating the network\", we mean \"peers exchange messages\". In the \n chain example above, with 4 validators arranged as 0 <-> 1 <-> 2 <-> 3, it takes two network \n updates for a message from validator 3 to reach validator 0 (when validator 3 sends their \n message, we assume that it reaches all their peers instantly).\n\n The update frequency of the network is represented by the network_update_rate simulation \n parameter, also in Hertz. A network_update_rate of 1 means that messages spread one step \n further on the network each second.\n \"\"\"\n frequency = params['frequency']\n network_update_rate = params['network_update_rate']\n assert frequency >= network_update_rate\n network = s['network']\n update_prob = float(network_update_rate) / float(frequency)\n if random.random() < update_prob:\n update_network(network)\n for validator in network.validators:\n validator.update_time(frequency)\n if s['timestep'] % 100 == 0:\n print('timestep', s['timestep'], 'of run', s['run'])\n return 'network', network\n\n\ndef update_attestations(params, step, sL, s, _input):\n network = s['network']\n disseminate_attestations(network, _input['attestations'])\n return 'network', network\n\n\ndef update_blocks(params, step, sL, s, _input):\n network = s['network']\n for block in _input['blocks']:\n disseminate_block(network, block.message.proposer_index, block)\n return 'network', network\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\ndef propose_policy(params, step, sL, s):\n network = s['network']\n produced_blocks = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n block = validator.propose(known_items)\n if block is not None:\n produced_blocks.append(block)\n return {'blocks': produced_blocks}\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\ndef simulate(network: Network, parameters: SimulationParameters, observers:\n Dict[str, Callable[[BeaconState], Any]]={}) ->pd.DataFrame:\n \"\"\"\n Args:\n network (Network): Network of :py:class:`beaconrunner.validatorlib.BRValidator`\n parameters (BRSimulationParameters): Simulation parameters\n\n Returns:\n pandas.DataFrame: Results of the simulation contained in a pandas data frame\n \"\"\"\n initial_conditions = {'network': network}\n psubs = [{'policies': {'action': attest_policy}, 'variables': {\n 'network': update_attestations}}, {'policies': {'action':\n propose_policy}, 'variables': {'network': update_blocks}}, {\n 'policies': {}, 'variables': {'network': tick}}]\n num_slots = parameters.num_epochs * SLOTS_PER_EPOCH\n steps = int(num_slots * SECONDS_PER_SLOT * parameters.frequency)\n params = {'frequency': [parameters.frequency], 'network_update_rate': [\n parameters.network_update_rate]}\n print('will simulate', parameters.num_epochs, 'epochs (', num_slots,\n 'slots ) at frequency', parameters.frequency, 'moves/second')\n print('total', steps, 'simulation steps')\n observed_ic = get_observed_initial_conditions(initial_conditions, observers\n )\n observed_psubs = get_observed_psubs(psubs, observers)\n sim_config = config_sim({'T': range(steps), 'N': 1, 'M': {'frequency':\n [parameters.frequency], 'network_update_rate': [parameters.\n network_update_rate]}})\n from cadCAD import configs\n del configs[:]\n experiment = Experiment()\n experiment.append_configs(initial_state=observed_ic,\n partial_state_update_blocks=observed_psubs, sim_configs=sim_config)\n exec_context = ExecutionContext()\n simulation = Executor(exec_context=exec_context, configs=configs)\n raw_result, tensor, sessions = simulation.execute()\n return pd.DataFrame(raw_result)\n",
"<import token>\n<code token>\n<import token>\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\ndef get_genesis_state(validators, seed='hello'):\n block_hash = hash(seed.encode('utf-8'))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(block_hash, eth1_timestamp,\n get_initial_deposits(validators))\n\n\ndef skip_genesis_block(validators):\n for validator in validators:\n validator.forward_by(SECONDS_PER_SLOT)\n\n\ndef tick(params, step, sL, s, _input):\n \"\"\"\n We call tick to move the clock by one step (= a second if frequency is 1, a tenth of a second \n if frequency is 10 etc). When tick moves the clock past the start of a new slot, validators \n update their internals, checking for instance their new attester or proposer duties if this \n tick coincides with a new epoch.\n\n Whenever tick is called, we also check whether we want the network to update or not, by \n flipping a biased coin. By \"updating the network\", we mean \"peers exchange messages\". In the \n chain example above, with 4 validators arranged as 0 <-> 1 <-> 2 <-> 3, it takes two network \n updates for a message from validator 3 to reach validator 0 (when validator 3 sends their \n message, we assume that it reaches all their peers instantly).\n\n The update frequency of the network is represented by the network_update_rate simulation \n parameter, also in Hertz. A network_update_rate of 1 means that messages spread one step \n further on the network each second.\n \"\"\"\n frequency = params['frequency']\n network_update_rate = params['network_update_rate']\n assert frequency >= network_update_rate\n network = s['network']\n update_prob = float(network_update_rate) / float(frequency)\n if random.random() < update_prob:\n update_network(network)\n for validator in network.validators:\n validator.update_time(frequency)\n if s['timestep'] % 100 == 0:\n print('timestep', s['timestep'], 'of run', s['run'])\n return 'network', network\n\n\ndef update_attestations(params, step, sL, s, _input):\n network = s['network']\n disseminate_attestations(network, _input['attestations'])\n return 'network', network\n\n\ndef update_blocks(params, step, sL, s, _input):\n network = s['network']\n for block in _input['blocks']:\n disseminate_block(network, block.message.proposer_index, block)\n return 'network', network\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\ndef propose_policy(params, step, sL, s):\n network = s['network']\n produced_blocks = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n block = validator.propose(known_items)\n if block is not None:\n produced_blocks.append(block)\n return {'blocks': produced_blocks}\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\ndef simulate(network: Network, parameters: SimulationParameters, observers:\n Dict[str, Callable[[BeaconState], Any]]={}) ->pd.DataFrame:\n \"\"\"\n Args:\n network (Network): Network of :py:class:`beaconrunner.validatorlib.BRValidator`\n parameters (BRSimulationParameters): Simulation parameters\n\n Returns:\n pandas.DataFrame: Results of the simulation contained in a pandas data frame\n \"\"\"\n initial_conditions = {'network': network}\n psubs = [{'policies': {'action': attest_policy}, 'variables': {\n 'network': update_attestations}}, {'policies': {'action':\n propose_policy}, 'variables': {'network': update_blocks}}, {\n 'policies': {}, 'variables': {'network': tick}}]\n num_slots = parameters.num_epochs * SLOTS_PER_EPOCH\n steps = int(num_slots * SECONDS_PER_SLOT * parameters.frequency)\n params = {'frequency': [parameters.frequency], 'network_update_rate': [\n parameters.network_update_rate]}\n print('will simulate', parameters.num_epochs, 'epochs (', num_slots,\n 'slots ) at frequency', parameters.frequency, 'moves/second')\n print('total', steps, 'simulation steps')\n observed_ic = get_observed_initial_conditions(initial_conditions, observers\n )\n observed_psubs = get_observed_psubs(psubs, observers)\n sim_config = config_sim({'T': range(steps), 'N': 1, 'M': {'frequency':\n [parameters.frequency], 'network_update_rate': [parameters.\n network_update_rate]}})\n from cadCAD import configs\n del configs[:]\n experiment = Experiment()\n experiment.append_configs(initial_state=observed_ic,\n partial_state_update_blocks=observed_psubs, sim_configs=sim_config)\n exec_context = ExecutionContext()\n simulation = Executor(exec_context=exec_context, configs=configs)\n raw_result, tensor, sessions = simulation.execute()\n return pd.DataFrame(raw_result)\n",
"<import token>\n<code token>\n<import token>\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\ndef get_genesis_state(validators, seed='hello'):\n block_hash = hash(seed.encode('utf-8'))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(block_hash, eth1_timestamp,\n get_initial_deposits(validators))\n\n\ndef skip_genesis_block(validators):\n for validator in validators:\n validator.forward_by(SECONDS_PER_SLOT)\n\n\ndef tick(params, step, sL, s, _input):\n \"\"\"\n We call tick to move the clock by one step (= a second if frequency is 1, a tenth of a second \n if frequency is 10 etc). When tick moves the clock past the start of a new slot, validators \n update their internals, checking for instance their new attester or proposer duties if this \n tick coincides with a new epoch.\n\n Whenever tick is called, we also check whether we want the network to update or not, by \n flipping a biased coin. By \"updating the network\", we mean \"peers exchange messages\". In the \n chain example above, with 4 validators arranged as 0 <-> 1 <-> 2 <-> 3, it takes two network \n updates for a message from validator 3 to reach validator 0 (when validator 3 sends their \n message, we assume that it reaches all their peers instantly).\n\n The update frequency of the network is represented by the network_update_rate simulation \n parameter, also in Hertz. A network_update_rate of 1 means that messages spread one step \n further on the network each second.\n \"\"\"\n frequency = params['frequency']\n network_update_rate = params['network_update_rate']\n assert frequency >= network_update_rate\n network = s['network']\n update_prob = float(network_update_rate) / float(frequency)\n if random.random() < update_prob:\n update_network(network)\n for validator in network.validators:\n validator.update_time(frequency)\n if s['timestep'] % 100 == 0:\n print('timestep', s['timestep'], 'of run', s['run'])\n return 'network', network\n\n\ndef update_attestations(params, step, sL, s, _input):\n network = s['network']\n disseminate_attestations(network, _input['attestations'])\n return 'network', network\n\n\n<function token>\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\ndef propose_policy(params, step, sL, s):\n network = s['network']\n produced_blocks = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n block = validator.propose(known_items)\n if block is not None:\n produced_blocks.append(block)\n return {'blocks': produced_blocks}\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\ndef simulate(network: Network, parameters: SimulationParameters, observers:\n Dict[str, Callable[[BeaconState], Any]]={}) ->pd.DataFrame:\n \"\"\"\n Args:\n network (Network): Network of :py:class:`beaconrunner.validatorlib.BRValidator`\n parameters (BRSimulationParameters): Simulation parameters\n\n Returns:\n pandas.DataFrame: Results of the simulation contained in a pandas data frame\n \"\"\"\n initial_conditions = {'network': network}\n psubs = [{'policies': {'action': attest_policy}, 'variables': {\n 'network': update_attestations}}, {'policies': {'action':\n propose_policy}, 'variables': {'network': update_blocks}}, {\n 'policies': {}, 'variables': {'network': tick}}]\n num_slots = parameters.num_epochs * SLOTS_PER_EPOCH\n steps = int(num_slots * SECONDS_PER_SLOT * parameters.frequency)\n params = {'frequency': [parameters.frequency], 'network_update_rate': [\n parameters.network_update_rate]}\n print('will simulate', parameters.num_epochs, 'epochs (', num_slots,\n 'slots ) at frequency', parameters.frequency, 'moves/second')\n print('total', steps, 'simulation steps')\n observed_ic = get_observed_initial_conditions(initial_conditions, observers\n )\n observed_psubs = get_observed_psubs(psubs, observers)\n sim_config = config_sim({'T': range(steps), 'N': 1, 'M': {'frequency':\n [parameters.frequency], 'network_update_rate': [parameters.\n network_update_rate]}})\n from cadCAD import configs\n del configs[:]\n experiment = Experiment()\n experiment.append_configs(initial_state=observed_ic,\n partial_state_update_blocks=observed_psubs, sim_configs=sim_config)\n exec_context = ExecutionContext()\n simulation = Executor(exec_context=exec_context, configs=configs)\n raw_result, tensor, sessions = simulation.execute()\n return pd.DataFrame(raw_result)\n",
"<import token>\n<code token>\n<import token>\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\ndef get_genesis_state(validators, seed='hello'):\n block_hash = hash(seed.encode('utf-8'))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(block_hash, eth1_timestamp,\n get_initial_deposits(validators))\n\n\ndef skip_genesis_block(validators):\n for validator in validators:\n validator.forward_by(SECONDS_PER_SLOT)\n\n\n<function token>\n\n\ndef update_attestations(params, step, sL, s, _input):\n network = s['network']\n disseminate_attestations(network, _input['attestations'])\n return 'network', network\n\n\n<function token>\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\ndef propose_policy(params, step, sL, s):\n network = s['network']\n produced_blocks = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n block = validator.propose(known_items)\n if block is not None:\n produced_blocks.append(block)\n return {'blocks': produced_blocks}\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\ndef simulate(network: Network, parameters: SimulationParameters, observers:\n Dict[str, Callable[[BeaconState], Any]]={}) ->pd.DataFrame:\n \"\"\"\n Args:\n network (Network): Network of :py:class:`beaconrunner.validatorlib.BRValidator`\n parameters (BRSimulationParameters): Simulation parameters\n\n Returns:\n pandas.DataFrame: Results of the simulation contained in a pandas data frame\n \"\"\"\n initial_conditions = {'network': network}\n psubs = [{'policies': {'action': attest_policy}, 'variables': {\n 'network': update_attestations}}, {'policies': {'action':\n propose_policy}, 'variables': {'network': update_blocks}}, {\n 'policies': {}, 'variables': {'network': tick}}]\n num_slots = parameters.num_epochs * SLOTS_PER_EPOCH\n steps = int(num_slots * SECONDS_PER_SLOT * parameters.frequency)\n params = {'frequency': [parameters.frequency], 'network_update_rate': [\n parameters.network_update_rate]}\n print('will simulate', parameters.num_epochs, 'epochs (', num_slots,\n 'slots ) at frequency', parameters.frequency, 'moves/second')\n print('total', steps, 'simulation steps')\n observed_ic = get_observed_initial_conditions(initial_conditions, observers\n )\n observed_psubs = get_observed_psubs(psubs, observers)\n sim_config = config_sim({'T': range(steps), 'N': 1, 'M': {'frequency':\n [parameters.frequency], 'network_update_rate': [parameters.\n network_update_rate]}})\n from cadCAD import configs\n del configs[:]\n experiment = Experiment()\n experiment.append_configs(initial_state=observed_ic,\n partial_state_update_blocks=observed_psubs, sim_configs=sim_config)\n exec_context = ExecutionContext()\n simulation = Executor(exec_context=exec_context, configs=configs)\n raw_result, tensor, sessions = simulation.execute()\n return pd.DataFrame(raw_result)\n",
"<import token>\n<code token>\n<import token>\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\ndef get_genesis_state(validators, seed='hello'):\n block_hash = hash(seed.encode('utf-8'))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(block_hash, eth1_timestamp,\n get_initial_deposits(validators))\n\n\ndef skip_genesis_block(validators):\n for validator in validators:\n validator.forward_by(SECONDS_PER_SLOT)\n\n\n<function token>\n\n\ndef update_attestations(params, step, sL, s, _input):\n network = s['network']\n disseminate_attestations(network, _input['attestations'])\n return 'network', network\n\n\n<function token>\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\ndef propose_policy(params, step, sL, s):\n network = s['network']\n produced_blocks = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n block = validator.propose(known_items)\n if block is not None:\n produced_blocks.append(block)\n return {'blocks': produced_blocks}\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\n<function token>\n",
"<import token>\n<code token>\n<import token>\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\ndef get_genesis_state(validators, seed='hello'):\n block_hash = hash(seed.encode('utf-8'))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(block_hash, eth1_timestamp,\n get_initial_deposits(validators))\n\n\ndef skip_genesis_block(validators):\n for validator in validators:\n validator.forward_by(SECONDS_PER_SLOT)\n\n\n<function token>\n\n\ndef update_attestations(params, step, sL, s, _input):\n network = s['network']\n disseminate_attestations(network, _input['attestations'])\n return 'network', network\n\n\n<function token>\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\n<function token>\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\n<function token>\n",
"<import token>\n<code token>\n<import token>\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\ndef get_genesis_state(validators, seed='hello'):\n block_hash = hash(seed.encode('utf-8'))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(block_hash, eth1_timestamp,\n get_initial_deposits(validators))\n\n\n<function token>\n<function token>\n\n\ndef update_attestations(params, step, sL, s, _input):\n network = s['network']\n disseminate_attestations(network, _input['attestations'])\n return 'network', network\n\n\n<function token>\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\n<function token>\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\n<function token>\n",
"<import token>\n<code token>\n<import token>\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\ndef get_genesis_state(validators, seed='hello'):\n block_hash = hash(seed.encode('utf-8'))\n eth1_timestamp = 1578009600\n return initialize_beacon_state_from_eth1(block_hash, eth1_timestamp,\n get_initial_deposits(validators))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\n<function token>\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\n<function token>\n",
"<import token>\n<code token>\n<import token>\n\n\ndef get_initial_deposits(validators):\n \"\"\"Produce deposits\n\n Args:\n validators (Sequence[BRValidator]): Validators of the simulation\n\n Returns:\n List[Deposit]: The list of deposits\n \"\"\"\n return [Deposit(data=DepositData(amount=eth_to_gwei(32), pubkey=v.\n pubkey)) for v in validators]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\n<function token>\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef attest_policy(params, step, sL, s):\n network = s['network']\n produced_attestations = []\n for validator_index, validator in enumerate(network.validators):\n known_items = knowledge_set(network, validator_index)\n attestation = validator.attest(known_items)\n if attestation is not None:\n produced_attestations.append([validator_index, attestation])\n return {'attestations': produced_attestations}\n\n\n<function token>\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n\n def __init__(self, obj):\n self.num_epochs = obj['num_epochs']\n self.num_run = obj['num_run']\n self.frequency = obj['frequency']\n self.network_update_rate = obj['network_update_rate']\n\n\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass SimulationParameters:\n num_epochs: uint64\n num_run: uint64\n frequency: uint64\n network_update_rate: float\n <function token>\n\n\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<function token>\n"
] | false |
98,615 |
b8cb14ab88be083345481b7d3b5362e47aacb28c
|
import threading
import time
import queue
import cwipc
import cwipc.codec
from typing import Optional, List, Any
from .abstract import VRT_4CC, vrt_fourcc_type, cwipc_producer_abstract, cwipc_rawsink_abstract, cwipc_sink_abstract
class _Sink_Encoder(threading.Thread, cwipc_sink_abstract):
"""A pointcloud sink that compresses pointclouds and forwards them to a rawsink."""
FOURCC="cwi1"
SELECT_TIMEOUT=0.1
QUEUE_FULL_TIMEOUT=0.001
sink : cwipc_rawsink_abstract
input_queue : queue.Queue[cwipc.cwipc_wrapper]
pointcounts : List[int]
tiledescriptions : List[cwipc.cwipc_tileinfo_pythonic]
encoder_group : Optional[cwipc.codec.cwipc_encodergroup_wrapper]
encoders : List[cwipc.codec.cwipc_encoder_wrapper]
times_encode : List[float]
# xxxjack the Any for sink is a cop-out. Need to define ABCs for all the types.
def __init__(self, sink : cwipc_rawsink_abstract, verbose : bool=False, nodrop : bool=False):
threading.Thread.__init__(self)
self.name = 'cwipc_util._Sink_Encoder'
self.sink = sink
self.sink.set_fourcc(self.FOURCC)
self.producer = None
self.nodrop = nodrop
self.input_queue = queue.Queue(maxsize=2)
self.verbose = verbose
self.nodrop = nodrop
self.stopped = False
self.started = False
self.times_encode = []
self.pointcounts = []
self.encoder_group = None
self.encoders = []
self.tiledescriptions = [{}]
self.octree_bits = None
self.jpeg_quality = None
def set_encoder_params(self, tiles : Optional[List[cwipc.cwipc_tileinfo_pythonic]] = None, octree_bits : Optional[int]=None, jpeg_quality : Optional[int]=None) -> None:
if tiles == None: tiles = [{}]
self.tiledescriptions = tiles
self.octree_bits = octree_bits
self.jpeg_quality = jpeg_quality
def start(self) -> None:
self._init_encoders()
threading.Thread.start(self)
self.sink.start()
self.started = True
def stop(self) -> None:
if self.verbose: print(f"encoder: stopping thread")
self.stopped = True
self.sink.stop()
if self.started:
self.join()
def set_producer(self, producer : Any) -> None:
self.producer = producer
self.sink.set_producer(producer)
def is_alive(self):
return not self.stopped
def run(self):
assert self.encoder_group
if self.verbose: print(f"encoder: thread started")
try:
while not self.stopped and self.producer and self.producer.is_alive():
pc = self.input_queue.get()
if not pc:
print(f"encoder: get() returned None")
continue
self.pointcounts.append(pc.count())
t1 = time.time()
self.encoder_group.feed(pc)
packets : List[bytearray] = []
for i in range(len(self.encoders)):
got_data = self.encoders[i].available(True)
assert got_data
cpc = self.encoders[i].get_bytes()
packets.append(cpc)
t2 = time.time()
if len(packets) == 1:
self.sink.feed(packets[0])
else:
for i in range(len(packets)):
self.sink.feed(packets[i], stream_index=i)
pc.free()
self.times_encode.append(t2-t1)
finally:
self.stopped = True
if self.verbose: print(f"encoder: thread stopping")
def feed(self, pc : cwipc.cwipc_wrapper) -> None:
try:
if self.nodrop:
self.input_queue.put(pc)
else:
self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)
except queue.Full:
if self.verbose: print(f"encoder: queue full, drop pointcloud")
pc.free()
def _init_encoders(self):
if not self.octree_bits:
self.octree_bits = 9
if type(self.octree_bits) != type([]):
self.octree_bits = [self.octree_bits]
if not self.jpeg_quality:
self.jpeg_quality = 85
if type(self.jpeg_quality) != type([]):
self.jpeg_quality = [self.jpeg_quality]
voxelsize = 0
if self.verbose:
print(f'encoder: creating {len(self.tiledescriptions)*len(self.octree_bits)*len(self.jpeg_quality)} encoders/streams')
self.encoder_group = cwipc.codec.cwipc_new_encodergroup()
for tile in range(len(self.tiledescriptions)):
for octree_bits in self.octree_bits:
for jpeg_quality in self.jpeg_quality:
srctile = self.tiledescriptions[tile].get('ncamera', tile)
encparams = cwipc.codec.cwipc_encoder_params(False, 1, 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)
encoder = self.encoder_group.addencoder(params=encparams)
self.encoders.append(encoder)
if hasattr(self.sink, 'add_streamDesc'):
# Our sink can handle multiple tiles/quality streams.
# Initialize to the best of our knowledge
if not 'normal' in self.tiledescriptions[tile]:
print(f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}')
normal = self.tiledescriptions[tile].get("normal", dict(x=0, y=0, z=0))
streamNum = self.sink.add_streamDesc(tile, normal['x'], normal['y'], normal['z']) # type: ignore
if self.verbose:
print(f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}')
else:
# Single stream sink.
streamNum = 0
assert streamNum == len(self.encoders)-1 # Fails if multi-stream not supported by network sink.
def statistics(self):
self.print1stat('encode_duration', self.times_encode)
self.print1stat('pointcount', self.pointcounts)
if hasattr(self.sink, 'statistics'):
self.sink.statistics()
def print1stat(self, name, values, isInt=False):
count = len(values)
if count == 0:
print('encoder: {}: count=0'.format(name))
return
minValue = min(values)
maxValue = max(values)
avgValue = sum(values) / count
if isInt:
fmtstring = 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}'
else:
fmtstring = 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'
print(fmtstring.format(name, count, avgValue, minValue, maxValue))
def cwipc_sink_encoder(sink : cwipc_rawsink_abstract, verbose : bool=False, nodrop : bool=False) -> cwipc_sink_abstract:
"""Create a cwipc_sink object that compresses pointclouds and forward them to a rawsink."""
if cwipc.codec == None:
raise RuntimeError("cwipc_sink_encoder: requires cwipc.codec with is not available")
return _Sink_Encoder(sink, verbose=verbose, nodrop=nodrop)
|
[
"import threading\nimport time\nimport queue\nimport cwipc\nimport cwipc.codec\nfrom typing import Optional, List, Any\nfrom .abstract import VRT_4CC, vrt_fourcc_type, cwipc_producer_abstract, cwipc_rawsink_abstract, cwipc_sink_abstract\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n \"\"\"A pointcloud sink that compresses pointclouds and forwards them to a rawsink.\"\"\"\n \n FOURCC=\"cwi1\"\n SELECT_TIMEOUT=0.1\n QUEUE_FULL_TIMEOUT=0.001\n\n sink : cwipc_rawsink_abstract\n input_queue : queue.Queue[cwipc.cwipc_wrapper]\n pointcounts : List[int]\n tiledescriptions : List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group : Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders : List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode : List[float]\n\n # xxxjack the Any for sink is a cop-out. Need to define ABCs for all the types.\n def __init__(self, sink : cwipc_rawsink_abstract, verbose : bool=False, nodrop : bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n \n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n \n def set_encoder_params(self, tiles : Optional[List[cwipc.cwipc_tileinfo_pythonic]] = None, octree_bits : Optional[int]=None, jpeg_quality : Optional[int]=None) -> None:\n if tiles == None: tiles = [{}]\n self.tiledescriptions = tiles\n self.octree_bits = octree_bits\n self.jpeg_quality = jpeg_quality\n \n def start(self) -> None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n \n def stop(self) -> None:\n if self.verbose: print(f\"encoder: stopping thread\")\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n \n def set_producer(self, producer : Any) -> None:\n self.producer = producer\n self.sink.set_producer(producer)\n \n def is_alive(self):\n return not self.stopped \n \n def run(self):\n assert self.encoder_group\n if self.verbose: print(f\"encoder: thread started\")\n try:\n while not self.stopped and self.producer and self.producer.is_alive():\n pc = self.input_queue.get()\n if not pc:\n print(f\"encoder: get() returned None\")\n continue\n self.pointcounts.append(pc.count())\n \n t1 = time.time()\n self.encoder_group.feed(pc)\n packets : List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n \n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2-t1)\n finally:\n self.stopped = True\n if self.verbose: print(f\"encoder: thread stopping\")\n \n def feed(self, pc : cwipc.cwipc_wrapper) -> None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose: print(f\"encoder: queue full, drop pointcloud\")\n pc.free()\n \n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n \n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n \n voxelsize = 0\n \n if self.verbose:\n print(f'encoder: creating {len(self.tiledescriptions)*len(self.octree_bits)*len(self.jpeg_quality)} encoders/streams')\n \n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n # Our sink can handle multiple tiles/quality streams.\n # Initialize to the best of our knowledge\n if not 'normal' in self.tiledescriptions[tile]:\n print(f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}')\n normal = self.tiledescriptions[tile].get(\"normal\", dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal['x'], normal['y'], normal['z']) # type: ignore\n if self.verbose:\n print(f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}')\n else:\n # Single stream sink.\n streamNum = 0\n assert streamNum == len(self.encoders)-1 # Fails if multi-stream not supported by network sink.\n\n def statistics(self):\n self.print1stat('encode_duration', self.times_encode)\n self.print1stat('pointcount', self.pointcounts)\n if hasattr(self.sink, 'statistics'):\n self.sink.statistics()\n \n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}'\n else:\n fmtstring = 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\ndef cwipc_sink_encoder(sink : cwipc_rawsink_abstract, verbose : bool=False, nodrop : bool=False) -> cwipc_sink_abstract:\n \"\"\"Create a cwipc_sink object that compresses pointclouds and forward them to a rawsink.\"\"\"\n if cwipc.codec == None:\n raise RuntimeError(\"cwipc_sink_encoder: requires cwipc.codec with is not available\")\n return _Sink_Encoder(sink, verbose=verbose, nodrop=nodrop)",
"import threading\nimport time\nimport queue\nimport cwipc\nimport cwipc.codec\nfrom typing import Optional, List, Any\nfrom .abstract import VRT_4CC, vrt_fourcc_type, cwipc_producer_abstract, cwipc_rawsink_abstract, cwipc_sink_abstract\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n \"\"\"A pointcloud sink that compresses pointclouds and forwards them to a rawsink.\"\"\"\n FOURCC = 'cwi1'\n SELECT_TIMEOUT = 0.1\n QUEUE_FULL_TIMEOUT = 0.001\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n\n def __init__(self, sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n\n def set_encoder_params(self, tiles: Optional[List[cwipc.\n cwipc_tileinfo_pythonic]]=None, octree_bits: Optional[int]=None,\n jpeg_quality: Optional[int]=None) ->None:\n if tiles == None:\n tiles = [{}]\n self.tiledescriptions = tiles\n self.octree_bits = octree_bits\n self.jpeg_quality = jpeg_quality\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n\n def set_producer(self, producer: Any) ->None:\n self.producer = producer\n self.sink.set_producer(producer)\n\n def is_alive(self):\n return not self.stopped\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n\n def statistics(self):\n self.print1stat('encode_duration', self.times_encode)\n self.print1stat('pointcount', self.pointcounts)\n if hasattr(self.sink, 'statistics'):\n self.sink.statistics()\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\ndef cwipc_sink_encoder(sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False) ->cwipc_sink_abstract:\n \"\"\"Create a cwipc_sink object that compresses pointclouds and forward them to a rawsink.\"\"\"\n if cwipc.codec == None:\n raise RuntimeError(\n 'cwipc_sink_encoder: requires cwipc.codec with is not available')\n return _Sink_Encoder(sink, verbose=verbose, nodrop=nodrop)\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n \"\"\"A pointcloud sink that compresses pointclouds and forwards them to a rawsink.\"\"\"\n FOURCC = 'cwi1'\n SELECT_TIMEOUT = 0.1\n QUEUE_FULL_TIMEOUT = 0.001\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n\n def __init__(self, sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n\n def set_encoder_params(self, tiles: Optional[List[cwipc.\n cwipc_tileinfo_pythonic]]=None, octree_bits: Optional[int]=None,\n jpeg_quality: Optional[int]=None) ->None:\n if tiles == None:\n tiles = [{}]\n self.tiledescriptions = tiles\n self.octree_bits = octree_bits\n self.jpeg_quality = jpeg_quality\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n\n def set_producer(self, producer: Any) ->None:\n self.producer = producer\n self.sink.set_producer(producer)\n\n def is_alive(self):\n return not self.stopped\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n\n def statistics(self):\n self.print1stat('encode_duration', self.times_encode)\n self.print1stat('pointcount', self.pointcounts)\n if hasattr(self.sink, 'statistics'):\n self.sink.statistics()\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\ndef cwipc_sink_encoder(sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False) ->cwipc_sink_abstract:\n \"\"\"Create a cwipc_sink object that compresses pointclouds and forward them to a rawsink.\"\"\"\n if cwipc.codec == None:\n raise RuntimeError(\n 'cwipc_sink_encoder: requires cwipc.codec with is not available')\n return _Sink_Encoder(sink, verbose=verbose, nodrop=nodrop)\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n \"\"\"A pointcloud sink that compresses pointclouds and forwards them to a rawsink.\"\"\"\n FOURCC = 'cwi1'\n SELECT_TIMEOUT = 0.1\n QUEUE_FULL_TIMEOUT = 0.001\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n\n def __init__(self, sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n\n def set_encoder_params(self, tiles: Optional[List[cwipc.\n cwipc_tileinfo_pythonic]]=None, octree_bits: Optional[int]=None,\n jpeg_quality: Optional[int]=None) ->None:\n if tiles == None:\n tiles = [{}]\n self.tiledescriptions = tiles\n self.octree_bits = octree_bits\n self.jpeg_quality = jpeg_quality\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n\n def set_producer(self, producer: Any) ->None:\n self.producer = producer\n self.sink.set_producer(producer)\n\n def is_alive(self):\n return not self.stopped\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n\n def statistics(self):\n self.print1stat('encode_duration', self.times_encode)\n self.print1stat('pointcount', self.pointcounts)\n if hasattr(self.sink, 'statistics'):\n self.sink.statistics()\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n FOURCC = 'cwi1'\n SELECT_TIMEOUT = 0.1\n QUEUE_FULL_TIMEOUT = 0.001\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n\n def __init__(self, sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n\n def set_encoder_params(self, tiles: Optional[List[cwipc.\n cwipc_tileinfo_pythonic]]=None, octree_bits: Optional[int]=None,\n jpeg_quality: Optional[int]=None) ->None:\n if tiles == None:\n tiles = [{}]\n self.tiledescriptions = tiles\n self.octree_bits = octree_bits\n self.jpeg_quality = jpeg_quality\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n\n def set_producer(self, producer: Any) ->None:\n self.producer = producer\n self.sink.set_producer(producer)\n\n def is_alive(self):\n return not self.stopped\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n\n def statistics(self):\n self.print1stat('encode_duration', self.times_encode)\n self.print1stat('pointcount', self.pointcounts)\n if hasattr(self.sink, 'statistics'):\n self.sink.statistics()\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n\n def __init__(self, sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n\n def set_encoder_params(self, tiles: Optional[List[cwipc.\n cwipc_tileinfo_pythonic]]=None, octree_bits: Optional[int]=None,\n jpeg_quality: Optional[int]=None) ->None:\n if tiles == None:\n tiles = [{}]\n self.tiledescriptions = tiles\n self.octree_bits = octree_bits\n self.jpeg_quality = jpeg_quality\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n\n def set_producer(self, producer: Any) ->None:\n self.producer = producer\n self.sink.set_producer(producer)\n\n def is_alive(self):\n return not self.stopped\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n\n def statistics(self):\n self.print1stat('encode_duration', self.times_encode)\n self.print1stat('pointcount', self.pointcounts)\n if hasattr(self.sink, 'statistics'):\n self.sink.statistics()\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n\n def __init__(self, sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n\n def set_producer(self, producer: Any) ->None:\n self.producer = producer\n self.sink.set_producer(producer)\n\n def is_alive(self):\n return not self.stopped\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n\n def statistics(self):\n self.print1stat('encode_duration', self.times_encode)\n self.print1stat('pointcount', self.pointcounts)\n if hasattr(self.sink, 'statistics'):\n self.sink.statistics()\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n\n def __init__(self, sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n <function token>\n\n def is_alive(self):\n return not self.stopped\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n\n def statistics(self):\n self.print1stat('encode_duration', self.times_encode)\n self.print1stat('pointcount', self.pointcounts)\n if hasattr(self.sink, 'statistics'):\n self.sink.statistics()\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n\n def __init__(self, sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n <function token>\n\n def is_alive(self):\n return not self.stopped\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n <function token>\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n\n def __init__(self, sink: cwipc_rawsink_abstract, verbose: bool=False,\n nodrop: bool=False):\n threading.Thread.__init__(self)\n self.name = 'cwipc_util._Sink_Encoder'\n self.sink = sink\n self.sink.set_fourcc(self.FOURCC)\n self.producer = None\n self.nodrop = nodrop\n self.input_queue = queue.Queue(maxsize=2)\n self.verbose = verbose\n self.nodrop = nodrop\n self.stopped = False\n self.started = False\n self.times_encode = []\n self.pointcounts = []\n self.encoder_group = None\n self.encoders = []\n self.tiledescriptions = [{}]\n self.octree_bits = None\n self.jpeg_quality = None\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n <function token>\n <function token>\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n <function token>\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n <function token>\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n <function token>\n <function token>\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n\n def feed(self, pc: cwipc.cwipc_wrapper) ->None:\n try:\n if self.nodrop:\n self.input_queue.put(pc)\n else:\n self.input_queue.put(pc, timeout=self.QUEUE_FULL_TIMEOUT)\n except queue.Full:\n if self.verbose:\n print(f'encoder: queue full, drop pointcloud')\n pc.free()\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n <function token>\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n <function token>\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n <function token>\n <function token>\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n <function token>\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n <function token>\n\n def print1stat(self, name, values, isInt=False):\n count = len(values)\n if count == 0:\n print('encoder: {}: count=0'.format(name))\n return\n minValue = min(values)\n maxValue = max(values)\n avgValue = sum(values) / count\n if isInt:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:d}, max={:d}')\n else:\n fmtstring = (\n 'encoder: {}: count={}, average={:.3f}, min={:.3f}, max={:.3f}'\n )\n print(fmtstring.format(name, count, avgValue, minValue, maxValue))\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n <function token>\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n <function token>\n <function token>\n\n def run(self):\n assert self.encoder_group\n if self.verbose:\n print(f'encoder: thread started')\n try:\n while (not self.stopped and self.producer and self.producer.\n is_alive()):\n pc = self.input_queue.get()\n if not pc:\n print(f'encoder: get() returned None')\n continue\n self.pointcounts.append(pc.count())\n t1 = time.time()\n self.encoder_group.feed(pc)\n packets: List[bytearray] = []\n for i in range(len(self.encoders)):\n got_data = self.encoders[i].available(True)\n assert got_data\n cpc = self.encoders[i].get_bytes()\n packets.append(cpc)\n t2 = time.time()\n if len(packets) == 1:\n self.sink.feed(packets[0])\n else:\n for i in range(len(packets)):\n self.sink.feed(packets[i], stream_index=i)\n pc.free()\n self.times_encode.append(t2 - t1)\n finally:\n self.stopped = True\n if self.verbose:\n print(f'encoder: thread stopping')\n <function token>\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n <function token>\n <function token>\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n <function token>\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n\n def stop(self) ->None:\n if self.verbose:\n print(f'encoder: stopping thread')\n self.stopped = True\n self.sink.stop()\n if self.started:\n self.join()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n <function token>\n <function token>\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n <function token>\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _init_encoders(self):\n if not self.octree_bits:\n self.octree_bits = 9\n if type(self.octree_bits) != type([]):\n self.octree_bits = [self.octree_bits]\n if not self.jpeg_quality:\n self.jpeg_quality = 85\n if type(self.jpeg_quality) != type([]):\n self.jpeg_quality = [self.jpeg_quality]\n voxelsize = 0\n if self.verbose:\n print(\n f'encoder: creating {len(self.tiledescriptions) * len(self.octree_bits) * len(self.jpeg_quality)} encoders/streams'\n )\n self.encoder_group = cwipc.codec.cwipc_new_encodergroup()\n for tile in range(len(self.tiledescriptions)):\n for octree_bits in self.octree_bits:\n for jpeg_quality in self.jpeg_quality:\n srctile = self.tiledescriptions[tile].get('ncamera', tile)\n encparams = cwipc.codec.cwipc_encoder_params(False, 1, \n 1.0, octree_bits, jpeg_quality, 16, srctile, voxelsize)\n encoder = self.encoder_group.addencoder(params=encparams)\n self.encoders.append(encoder)\n if hasattr(self.sink, 'add_streamDesc'):\n if not 'normal' in self.tiledescriptions[tile]:\n print(\n f'encoder: warning: tile {tile} description has no normal vector: {self.tiledescriptions[tile]}'\n )\n normal = self.tiledescriptions[tile].get('normal',\n dict(x=0, y=0, z=0))\n streamNum = self.sink.add_streamDesc(tile, normal[\n 'x'], normal['y'], normal['z'])\n if self.verbose:\n print(\n f'encoder: streamNum={streamNum}, tile={tile}, srctile={srctile}, normal={normal}, octree_bits={octree_bits}, jpeg_quality={jpeg_quality}'\n )\n else:\n streamNum = 0\n assert streamNum == len(self.encoders) - 1\n <function token>\n <function token>\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n <function token>\n <function token>\n\n def start(self) ->None:\n self._init_encoders()\n threading.Thread.start(self)\n self.sink.start()\n self.started = True\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n",
"<import token>\n\n\nclass _Sink_Encoder(threading.Thread, cwipc_sink_abstract):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n sink: cwipc_rawsink_abstract\n input_queue: queue.Queue[cwipc.cwipc_wrapper]\n pointcounts: List[int]\n tiledescriptions: List[cwipc.cwipc_tileinfo_pythonic]\n encoder_group: Optional[cwipc.codec.cwipc_encodergroup_wrapper]\n encoders: List[cwipc.codec.cwipc_encoder_wrapper]\n times_encode: List[float]\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n",
"<import token>\n<class token>\n<function token>\n"
] | false |
98,616 |
626926d2a179fbcea2d73d9ba89d720c83708d03
|
"""Modèle de case management adapté au projet Labster."""
from __future__ import annotations
from collections.abc import Container
from datetime import date, datetime
from typing import Dict
import structlog
from labster.domain.services.notifications import send_email
from labster.forms.workflow import ConfirmerFinalisationForm, \
ConfirmerRecevabiliteForm, WorkflowForm
from labster.lib.workflow import State, Transition, Workflow
logger = structlog.get_logger()
class EnEdition(State):
label = "En édition"
next_action = "Edition à finaliser et à soumettre"
def on_enter(self, workflow):
case = workflow.case
case.active = True
case.editable = True
def task_owners(self, workflow):
case = workflow.case
return {u for u in [case.gestionnaire, case.porteur] if u}
class EnValidation(State):
label = "En cours de validation hiérarchique"
label_short = "En validation"
next_action = "Demande à considérer pour validation"
def task_owners(self, workflow):
demande = workflow.case
# assert validation_stage
if not demande.wf_stage:
logger.warning(
f"Warning: la demande {demande.id} n'a pas de validation_stage"
)
demande.wf_stage = next_validation_stage(demande)
return demande.wf_stage.direction
def next_validation_stage(demande):
from labster.domain.models.unites import DEPARTEMENT, EQUIPE, LABORATOIRE
structure = demande.structure
stage = demande.wf_stage
if not stage:
stage = structure
if stage.type == EQUIPE and not structure.wf_must_validate(EQUIPE):
stage = stage.parent
if stage.type == DEPARTEMENT and not structure.wf_must_validate(DEPARTEMENT):
stage = stage.parent
if stage.type == LABORATOIRE and not structure.wf_must_validate(LABORATOIRE):
stage = None
assert stage is None or stage.type in [EQUIPE, DEPARTEMENT, LABORATOIRE]
return stage
stage = stage.parent
if not stage:
return None
if stage.type == DEPARTEMENT and not structure.wf_must_validate(DEPARTEMENT):
stage = stage.parent
if not stage:
return None
if stage.type == LABORATOIRE and not structure.wf_must_validate(LABORATOIRE):
stage = None
if not stage:
return None
if stage == None or stage.type in [DEPARTEMENT, LABORATOIRE]:
return stage
return None
class EnVerification(State):
label = "Recevabilité en cours de vérification"
label_short = "En vérification"
next_action = "Recevabilité à confirmer"
def on_enter(self, workflow):
workflow.set_value("ar_envoye", False)
def task_owners(self, workflow):
case = workflow.case
if case.contact_dgrtt:
return [case.contact_dgrtt]
else:
return []
class EnInstruction(State):
label = "En cours d'instruction par la DR&I"
label_short = "En instruction"
next_action = "Instruction à mener et finaliser"
def task_owners(self, workflow):
case = workflow.case
if case.contact_dgrtt:
return [case.contact_dgrtt]
else:
return []
# Etats finaux
class Traitee(State):
label = "Traitée par la DR&I"
label_short = "Traitée"
is_final = True
class Rejetee(State):
label = "Rejetée par la DR&I"
label_short = "Rejetée"
is_final = True
class Abandonnee(State):
label = "Abandonnée par le porteur"
label_short = "Abandonnée"
is_final = True
EN_EDITION = EnEdition()
EN_VALIDATION = EnValidation()
EN_VERIFICATION = EnVerification()
EN_INSTRUCTION = EnInstruction()
TRAITEE = Traitee()
REJETEE = Rejetee()
ABANDONNEE = Abandonnee()
ACTIVE_STATES: list[State] = [
EN_EDITION,
EN_VALIDATION,
EN_VERIFICATION,
EN_INSTRUCTION,
]
INACTIVE_STATES: list[State] = [TRAITEE, REJETEE, ABANDONNEE]
ALL_STATES: list[State] = ACTIVE_STATES + INACTIVE_STATES
#
# Transitions
#
class Abandonner(Transition):
label = "Abandonner la demande"
category = "danger"
from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]
to_state = ABANDONNEE
message = "{actor} a abandonné la demande."
def precondition(self, workflow):
return workflow.actor_is_porteur_or_gdl()
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
if old_state == EN_EDITION:
return []
if old_state == EN_VALIDATION:
return case.owners + case.structure.direction
if old_state in (EN_VERIFICATION, EN_INSTRUCTION):
return case.owners + [case.contact_dgrtt]
raise RuntimeError(f"Unknown state: {old_state}")
class Desarchiver(Transition):
label = "Désarchiver la demande"
category = "danger"
from_states = [ABANDONNEE, REJETEE, TRAITEE]
message = "{actor} a désarchivé la demande."
def apply(self, workflow, data):
from labster.domain.models.demandes import Demande
demande = workflow.case # type: Demande
old_state_id = demande.wf_history[-1].get("old_state", "EN_EDITION")
old_state = workflow.get_state_by_id(old_state_id)
old_state.enter(workflow)
demande.active = True
demande.editable = True
def get_form(self, workflow, **kw):
return WorkflowForm(require_note=True)
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
return case.owners
class Soumettre(Transition):
label = "Soumettre la demande"
from_states = [EN_EDITION]
def precondition(self, workflow):
demande = workflow.case
structure = demande.structure
if not demande.is_valid():
return False
if workflow.actor == demande.porteur:
return True
# Cas à gérer: la demande est issue d'une sous-structure dans
# le périmètre du gestionnaire.
#
# demande.structure et workflow.actor.structure peuvent être différentes.
#
# Par exemple:
# demande.structure = equipe Developpement des circuits neuronaux
# workflow.actor.structure = Institut de la vision
# Et dans ce cas
# demande.structure.get_gestionnaires() = []
#
# Sur le formulaire de la demande, l'utilisateur voit
# "Institut de la vision", donc peut aller changer son paramètre "permettre_soummission_directe" à True
# On a donc
# demande.structure.permettre_soummission_directe inchangé, potentiellement à False,
# et workflow.actor.structure.permettre_soummission_directe à True.
#
# On a donc besoin des deux "if" suivants.
if (
workflow.actor.structure
and workflow.actor.structure.permettre_soummission_directe
and workflow.actor in workflow.actor.structure.get_gestionnaires()
):
return True
if (
structure.permettre_soummission_directe
and workflow.actor in structure.get_gestionnaires()
):
return True
if (
workflow.get_value("validee_hierarchie")
and structure.permettre_reponse_directe
and workflow.actor == demande.gestionnaire
):
return True
return False
def apply(self, workflow, data):
demande = workflow.case
demande.editable = False
if data.get("resoumission"):
workflow.set_value("validee_hierarchie", False)
if workflow.get_value("validee_hierarchie"):
if workflow.get_value("recevable"):
EN_INSTRUCTION.enter(workflow)
else:
EN_VERIFICATION.enter(workflow)
else:
demande.wf_stage = next_validation_stage(demande)
EN_VALIDATION.enter(workflow)
def message(self, workflow):
if workflow.get_value("validee_hierarchie"):
return "{actor} a resoumis sa demande sans revalidation hiérarchique"
else:
return "{actor} a soumis sa demande pour validation hiérarchique."
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
if workflow.get_value("validee_hierarchie"):
return [case.contact_dgrtt]
else:
return case.wf_stage.direction
def get_form(self, workflow, **kw):
if workflow.get_value("validee_hierarchie"):
return WorkflowForm(ask_for_revalidation=True)
return WorkflowForm()
#
# Prendre la main
#
class AbstractPrendreLaMain(Transition):
label = "Prendre la main sur la demande"
category = "danger"
message = "{actor} a pris la main sur la demande."
def get_users_to_notify(self, workflow, old_state):
from labster.domain.models.profiles import Profile
case = workflow.case
actor = workflow.actor
user_ids = {entry["actor_id"] for entry in case.wf_history}
users = []
for id in user_ids:
try:
user = Profile.query.get(id)
if user != actor:
users.append(user)
except Exception:
pass
return users
class PrendreLaMainGestionnaire(AbstractPrendreLaMain):
from_states = ACTIVE_STATES
def precondition(self, workflow):
case = workflow.case
actor = workflow.actor
return actor.has_role("gestionnaire", case) and actor not in (
case.gestionnaire,
case.porteur,
)
def apply(self, workflow, data):
case = workflow.case
actor = workflow.actor
case.gestionnaire = actor
class PrendreLaMainDgrtt(AbstractPrendreLaMain):
from_states = ACTIVE_STATES
def precondition(self, workflow):
case = workflow.case
actor = workflow.actor
return actor.has_role("dgrtt") and not actor == case.contact_dgrtt
def apply(self, workflow, data):
case = workflow.case
actor = workflow.actor
case.contact_dgrtt = actor
#
# "Valider demande (hiérarchie)
#
class ValiderDir(Transition):
label = "Valider la demande"
from_states = [EN_VALIDATION]
message = "Demande validée par la hiérarchie ({actor})."
def precondition(self, workflow):
return workflow.actor in workflow.state.task_owners(workflow)
def apply(self, workflow, data):
demande = workflow.case
demande.assigne_contact_dgrtt()
workflow.set_value("validee_hierarchie", True)
demande.date_effective = date.today()
next_stage = next_validation_stage(demande)
demande.wf_stage = next_stage
if not next_stage:
EN_VERIFICATION.enter(workflow)
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
if case.contact_dgrtt:
return case.owners + [case.contact_dgrtt]
else:
return case.owners
class RequerirModificationDir(Transition):
label = "Requérir modification / complément"
from_states = [EN_VALIDATION]
to_state = EN_EDITION
message = (
"Demande de compléments / modifications par {actor} "
"(direction labo/département/équipe) pour vérification de recevabilité."
)
def precondition(self, workflow):
return workflow.actor in workflow.state.task_owners(workflow)
def apply(self, workflow, data):
demande = workflow.case
demande.wf_stage = None
workflow.set_value("validee_hierarchie", False)
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
return case.owners
def get_form(self, workflow, **kw):
return WorkflowForm(require_note=True)
#
# DGRTT (renommé DR&I)
#
class AccuserReception(Transition):
label = "Accuser réception en attendant vérification ultérieure"
from_states = [EN_VERIFICATION]
to_state = EN_VERIFICATION
message = "Accusé de réception envoyé par {actor} (contact)."
def precondition(self, workflow):
return workflow.actor_is_contact_dgrtt() and not workflow.get_value(
"ar_envoye", False
)
def apply(self, workflow, data):
workflow.set_value("ar_envoye", True)
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
return case.owners
class RequerirModificationDgrtt(Transition):
label = "Requérir modification / complément"
from_states = [EN_VERIFICATION, EN_INSTRUCTION]
to_state = EN_EDITION
def precondition(self, workflow):
return workflow.actor_is_contact_dgrtt()
def apply(self, workflow, data):
if data.get("resoumission"):
workflow.set_value("validee_hierarchie", False)
def message(self, workflow):
if workflow.state == EN_VERIFICATION:
return (
"Demande de compléments / modifications par {actor} "
"(contact) pour vérification de recevabilité."
)
else:
return (
"Demande de compléments / modifications par {actor} "
"(contact) pour instruction."
)
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
return case.owners
def get_form(self, workflow, **kw):
if workflow.get_value("validee_hierarchie"):
return WorkflowForm(ask_for_revalidation=True, require_note=True)
return WorkflowForm(require_note=True)
class ConfirmerRecevabiliteDgrtt(Transition):
label = "Confirmer recevabilité"
from_states = [EN_VERIFICATION]
to_state = EN_INSTRUCTION
def precondition(self, workflow):
return workflow.actor_is_contact_dgrtt()
def apply(self, workflow, data):
workflow.set_value("recevable", True)
self.send_notification(workflow)
def send_notification(self, workflow):
case = workflow.case
subject = "Recevabilité de votre demande par la DR&I"
ctx = {
"transition": self,
"demande": case,
"workflow": workflow,
"now": datetime.now(),
}
send_email(case.owners, subject, "notif-demande-recevable.html", ctx)
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
return case.owners
def get_form(self, workflow, **kw):
return ConfirmerRecevabiliteForm()
def message(self, workflow):
demande = workflow.case
tpl = (
"Recevabilité confirmée par {actor} (contact). "
"No Infolab: %s." % demande.no_infolab
)
return tpl
class ConfirmerFinalisationDgrtt(Transition):
label = "Confirmer finalisation"
from_states = [EN_INSTRUCTION]
to_state = TRAITEE
def precondition(self, workflow):
return workflow.actor_is_contact_dgrtt()
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
return case.owners
def get_form(self, workflow, **kw):
return ConfirmerFinalisationForm()
def apply(self, workflow, data):
self.send_notification(workflow)
def send_notification(self, workflow):
case = workflow.case
ctx = {
"transition": self,
"demande": case,
"workflow": workflow,
"now": datetime.now(),
}
subject = "Finalisation de votre demande par la DR&I"
du = case.laboratoire.direction
recipients = case.owners + du
send_email(recipients, subject, "notif-demande-finalisee.html", ctx)
def message(self, workflow):
demande = workflow.case
tpl = (
"Traitement finalisé par {actor} (contact)."
"No eOTP: %s." % demande.no_eotp
)
return tpl
class RejeterDgrtt(Transition):
label = "Rejeter / abandonner demande"
category = "danger"
from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]
to_state = REJETEE
message = "Demande rejetée / abandonnées par {actor} (contact)."
def precondition(self, workflow):
actor = workflow.actor
return actor.has_role("dgrtt")
def get_users_to_notify(self, workflow, old_state):
case = workflow.case
return case.owners
def apply(self, workflow, data):
# type: (Workflow, Dict) -> None
self.send_notification(workflow)
def send_notification(self, workflow):
# type: (Workflow) -> None
case = workflow.case
ctx = {
"transition": self,
"demande": case,
"workflow": workflow,
"now": datetime.now(),
}
subject = "Rejet de votre demande par la DR&I"
send_email(case.owners, subject, "notif-demande-rejetee.html", ctx)
def get_form(self, workflow, **kw):
return WorkflowForm(require_note=True)
class Commenter(Transition):
label = "Envoyer un message"
category = "success"
from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]
message = "{actor} a posté le commentaire ou la question suivante: "
def precondition(self, workflow):
# type: (Workflow) -> bool
actor = workflow.actor
demande = workflow.case
if workflow.state == EN_EDITION and not demande.contact_dgrtt:
return False
return actor in self._get_stakeholder(workflow)
def get_users_to_notify(self, workflow, old_state):
return self._get_stakeholder(workflow)
def _get_stakeholder(self, workflow):
# type: (Workflow) -> Container
from .profiles import Profile
from .unites import OrgUnit
demande = workflow.case
structure = demande.structure # type: OrgUnit
stakeholders = set(demande.owners)
if demande.contact_dgrtt:
stakeholders.add(demande.contact_dgrtt)
directeurs = structure.get_directeurs()
for directeur in directeurs:
stakeholders.add(directeur)
for history_item in demande.wf_history:
actor_id = history_item["actor_id"]
if actor_id:
actor = Profile.query.get(actor_id)
stakeholders.add(actor)
return stakeholders
def get_form(self, workflow, **kw):
return WorkflowForm(require_note=True)
def apply(self, workflow, data):
# type: (Workflow, Dict) -> None
self.send_notification(workflow)
def send_notification(self, workflow):
# type: (Workflow) -> None
case = workflow.case
ctx = {
"transition": self,
"demande": case,
"workflow": workflow,
"now": datetime.now(),
}
subject = "Un commentaire sur votre demande"
send_email(case.owners, subject, "notif-demande-comment.html", ctx)
#
ABANDONNER = Abandonner()
DESARCHIVER = Desarchiver()
SOUMETTRE = Soumettre()
PRENDRE_LA_MAIN_GESTIONNAIRE = PrendreLaMainGestionnaire()
PRENDRE_LA_MAIN_DGRTT = PrendreLaMainDgrtt()
#
VALIDER_DIR = ValiderDir()
REQUERIR_MODIFICATION_DIR = RequerirModificationDir()
#
ACCUSER_RECEPTION = AccuserReception()
REQUERIR_MODIFICATION_DGRTT = RequerirModificationDgrtt()
CONFIRMER_RECEVABILITE_DGRTT = ConfirmerRecevabiliteDgrtt()
CONFIRMER_FINALISATION_DGRTT = ConfirmerFinalisationDgrtt()
REJETER_DGRTT = RejeterDgrtt()
#
COMMENTER = Commenter()
#
# Workflow
#
class LabsterWorkflow(Workflow):
initial_state = EN_EDITION
states = ALL_STATES
# NB: order counts!
transitions = [
SOUMETTRE,
PRENDRE_LA_MAIN_GESTIONNAIRE,
VALIDER_DIR,
PRENDRE_LA_MAIN_DGRTT,
REQUERIR_MODIFICATION_DIR,
ACCUSER_RECEPTION,
CONFIRMER_RECEVABILITE_DGRTT,
CONFIRMER_FINALISATION_DGRTT,
REQUERIR_MODIFICATION_DGRTT,
REJETER_DGRTT,
ABANDONNER,
DESARCHIVER,
COMMENTER,
]
def actor_is_contact_dgrtt(self):
return self.actor == self.case.contact_dgrtt
def actor_is_porteur_or_gdl(self):
return self.actor in (self.case.porteur, self.case.gestionnaire)
|
[
"\"\"\"Modèle de case management adapté au projet Labster.\"\"\"\nfrom __future__ import annotations\n\nfrom collections.abc import Container\nfrom datetime import date, datetime\nfrom typing import Dict\n\nimport structlog\n\nfrom labster.domain.services.notifications import send_email\nfrom labster.forms.workflow import ConfirmerFinalisationForm, \\\n ConfirmerRecevabiliteForm, WorkflowForm\nfrom labster.lib.workflow import State, Transition, Workflow\n\nlogger = structlog.get_logger()\n\n\nclass EnEdition(State):\n label = \"En édition\"\n next_action = \"Edition à finaliser et à soumettre\"\n\n def on_enter(self, workflow):\n case = workflow.case\n case.active = True\n case.editable = True\n\n def task_owners(self, workflow):\n case = workflow.case\n return {u for u in [case.gestionnaire, case.porteur] if u}\n\n\nclass EnValidation(State):\n label = \"En cours de validation hiérarchique\"\n label_short = \"En validation\"\n next_action = \"Demande à considérer pour validation\"\n\n def task_owners(self, workflow):\n demande = workflow.case\n # assert validation_stage\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\ndef next_validation_stage(demande):\n from labster.domain.models.unites import DEPARTEMENT, EQUIPE, LABORATOIRE\n\n structure = demande.structure\n stage = demande.wf_stage\n\n if not stage:\n stage = structure\n if stage.type == EQUIPE and not structure.wf_must_validate(EQUIPE):\n stage = stage.parent\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(DEPARTEMENT):\n stage = stage.parent\n if stage.type == LABORATOIRE and not structure.wf_must_validate(LABORATOIRE):\n stage = None\n assert stage is None or stage.type in [EQUIPE, DEPARTEMENT, LABORATOIRE]\n return stage\n\n stage = stage.parent\n if not stage:\n return None\n\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(DEPARTEMENT):\n stage = stage.parent\n if not stage:\n return None\n\n if stage.type == LABORATOIRE and not structure.wf_must_validate(LABORATOIRE):\n stage = None\n if not stage:\n return None\n\n if stage == None or stage.type in [DEPARTEMENT, LABORATOIRE]:\n return stage\n\n return None\n\n\nclass EnVerification(State):\n label = \"Recevabilité en cours de vérification\"\n label_short = \"En vérification\"\n next_action = \"Recevabilité à confirmer\"\n\n def on_enter(self, workflow):\n workflow.set_value(\"ar_envoye\", False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = \"En instruction\"\n next_action = \"Instruction à mener et finaliser\"\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\n# Etats finaux\nclass Traitee(State):\n label = \"Traitée par la DR&I\"\n label_short = \"Traitée\"\n is_final = True\n\n\nclass Rejetee(State):\n label = \"Rejetée par la DR&I\"\n label_short = \"Rejetée\"\n is_final = True\n\n\nclass Abandonnee(State):\n label = \"Abandonnée par le porteur\"\n label_short = \"Abandonnée\"\n is_final = True\n\n\nEN_EDITION = EnEdition()\nEN_VALIDATION = EnValidation()\nEN_VERIFICATION = EnVerification()\nEN_INSTRUCTION = EnInstruction()\nTRAITEE = Traitee()\nREJETEE = Rejetee()\nABANDONNEE = Abandonnee()\n\nACTIVE_STATES: list[State] = [\n EN_EDITION,\n EN_VALIDATION,\n EN_VERIFICATION,\n EN_INSTRUCTION,\n]\nINACTIVE_STATES: list[State] = [TRAITEE, REJETEE, ABANDONNEE]\nALL_STATES: list[State] = ACTIVE_STATES + INACTIVE_STATES\n\n\n#\n# Transitions\n#\nclass Abandonner(Transition):\n label = \"Abandonner la demande\"\n category = \"danger\"\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = \"{actor} a abandonné la demande.\"\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f\"Unknown state: {old_state}\")\n\n\nclass Desarchiver(Transition):\n label = \"Désarchiver la demande\"\n category = \"danger\"\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = \"{actor} a désarchivé la demande.\"\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n\n demande = workflow.case # type: Demande\n old_state_id = demande.wf_history[-1].get(\"old_state\", \"EN_EDITION\")\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = \"Soumettre la demande\"\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n\n if not demande.is_valid():\n return False\n\n if workflow.actor == demande.porteur:\n return True\n\n # Cas à gérer: la demande est issue d'une sous-structure dans\n # le périmètre du gestionnaire.\n #\n # demande.structure et workflow.actor.structure peuvent être différentes.\n #\n # Par exemple:\n # demande.structure = equipe Developpement des circuits neuronaux\n # workflow.actor.structure = Institut de la vision\n # Et dans ce cas\n # demande.structure.get_gestionnaires() = []\n #\n # Sur le formulaire de la demande, l'utilisateur voit\n # \"Institut de la vision\", donc peut aller changer son paramètre \"permettre_soummission_directe\" à True\n # On a donc\n # demande.structure.permettre_soummission_directe inchangé, potentiellement à False,\n # et workflow.actor.structure.permettre_soummission_directe à True.\n #\n # On a donc besoin des deux \"if\" suivants.\n if (\n workflow.actor.structure\n and workflow.actor.structure.permettre_soummission_directe\n and workflow.actor in workflow.actor.structure.get_gestionnaires()\n ):\n return True\n\n if (\n structure.permettre_soummission_directe\n and workflow.actor in structure.get_gestionnaires()\n ):\n return True\n\n if (\n workflow.get_value(\"validee_hierarchie\")\n and structure.permettre_reponse_directe\n and workflow.actor == demande.gestionnaire\n ):\n return True\n\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n\n if data.get(\"resoumission\"):\n workflow.set_value(\"validee_hierarchie\", False)\n\n if workflow.get_value(\"validee_hierarchie\"):\n if workflow.get_value(\"recevable\"):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value(\"validee_hierarchie\"):\n return \"{actor} a resoumis sa demande sans revalidation hiérarchique\"\n else:\n return \"{actor} a soumis sa demande pour validation hiérarchique.\"\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value(\"validee_hierarchie\"):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value(\"validee_hierarchie\"):\n return WorkflowForm(ask_for_revalidation=True)\n\n return WorkflowForm()\n\n\n#\n# Prendre la main\n#\nclass AbstractPrendreLaMain(Transition):\n label = \"Prendre la main sur la demande\"\n category = \"danger\"\n message = \"{actor} a pris la main sur la demande.\"\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry[\"actor_id\"] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role(\"gestionnaire\", case) and actor not in (\n case.gestionnaire,\n case.porteur,\n )\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role(\"dgrtt\") and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\n#\n# \"Valider demande (hiérarchie)\n#\nclass ValiderDir(Transition):\n label = \"Valider la demande\"\n from_states = [EN_VALIDATION]\n message = \"Demande validée par la hiérarchie ({actor}).\"\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value(\"validee_hierarchie\", True)\n demande.date_effective = date.today()\n\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = \"Requérir modification / complément\"\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n \"Demande de compléments / modifications par {actor} \"\n \"(direction labo/département/équipe) pour vérification de recevabilité.\"\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value(\"validee_hierarchie\", False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\n#\n# DGRTT (renommé DR&I)\n#\nclass AccuserReception(Transition):\n label = \"Accuser réception en attendant vérification ultérieure\"\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = \"Accusé de réception envoyé par {actor} (contact).\"\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n \"ar_envoye\", False\n )\n\n def apply(self, workflow, data):\n workflow.set_value(\"ar_envoye\", True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = \"Requérir modification / complément\"\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get(\"resoumission\"):\n workflow.set_value(\"validee_hierarchie\", False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n \"Demande de compléments / modifications par {actor} \"\n \"(contact) pour vérification de recevabilité.\"\n )\n else:\n return (\n \"Demande de compléments / modifications par {actor} \"\n \"(contact) pour instruction.\"\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value(\"validee_hierarchie\"):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = \"Confirmer recevabilité\"\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value(\"recevable\", True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = \"Recevabilité de votre demande par la DR&I\"\n ctx = {\n \"transition\": self,\n \"demande\": case,\n \"workflow\": workflow,\n \"now\": datetime.now(),\n }\n send_email(case.owners, subject, \"notif-demande-recevable.html\", ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n \"Recevabilité confirmée par {actor} (contact). \"\n \"No Infolab: %s.\" % demande.no_infolab\n )\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = \"Confirmer finalisation\"\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {\n \"transition\": self,\n \"demande\": case,\n \"workflow\": workflow,\n \"now\": datetime.now(),\n }\n subject = \"Finalisation de votre demande par la DR&I\"\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, \"notif-demande-finalisee.html\", ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n \"Traitement finalisé par {actor} (contact).\"\n \"No eOTP: %s.\" % demande.no_eotp\n )\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = \"Rejeter / abandonner demande\"\n category = \"danger\"\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = \"Demande rejetée / abandonnées par {actor} (contact).\"\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role(\"dgrtt\")\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n # type: (Workflow, Dict) -> None\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n # type: (Workflow) -> None\n case = workflow.case\n ctx = {\n \"transition\": self,\n \"demande\": case,\n \"workflow\": workflow,\n \"now\": datetime.now(),\n }\n subject = \"Rejet de votre demande par la DR&I\"\n send_email(case.owners, subject, \"notif-demande-rejetee.html\", ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = \"Envoyer un message\"\n category = \"success\"\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = \"{actor} a posté le commentaire ou la question suivante: \"\n\n def precondition(self, workflow):\n # type: (Workflow) -> bool\n\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n # type: (Workflow) -> Container\n from .profiles import Profile\n from .unites import OrgUnit\n\n demande = workflow.case\n structure = demande.structure # type: OrgUnit\n\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n\n for history_item in demande.wf_history:\n actor_id = history_item[\"actor_id\"]\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n # type: (Workflow, Dict) -> None\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n # type: (Workflow) -> None\n case = workflow.case\n ctx = {\n \"transition\": self,\n \"demande\": case,\n \"workflow\": workflow,\n \"now\": datetime.now(),\n }\n subject = \"Un commentaire sur votre demande\"\n send_email(case.owners, subject, \"notif-demande-comment.html\", ctx)\n\n\n#\nABANDONNER = Abandonner()\nDESARCHIVER = Desarchiver()\nSOUMETTRE = Soumettre()\nPRENDRE_LA_MAIN_GESTIONNAIRE = PrendreLaMainGestionnaire()\nPRENDRE_LA_MAIN_DGRTT = PrendreLaMainDgrtt()\n#\nVALIDER_DIR = ValiderDir()\nREQUERIR_MODIFICATION_DIR = RequerirModificationDir()\n#\nACCUSER_RECEPTION = AccuserReception()\nREQUERIR_MODIFICATION_DGRTT = RequerirModificationDgrtt()\nCONFIRMER_RECEVABILITE_DGRTT = ConfirmerRecevabiliteDgrtt()\nCONFIRMER_FINALISATION_DGRTT = ConfirmerFinalisationDgrtt()\nREJETER_DGRTT = RejeterDgrtt()\n#\nCOMMENTER = Commenter()\n\n\n#\n# Workflow\n#\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n\n states = ALL_STATES\n\n # NB: order counts!\n transitions = [\n SOUMETTRE,\n PRENDRE_LA_MAIN_GESTIONNAIRE,\n VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT,\n REQUERIR_MODIFICATION_DIR,\n ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT,\n CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT,\n REJETER_DGRTT,\n ABANDONNER,\n DESARCHIVER,\n COMMENTER,\n ]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\nfrom __future__ import annotations\nfrom collections.abc import Container\nfrom datetime import date, datetime\nfrom typing import Dict\nimport structlog\nfrom labster.domain.services.notifications import send_email\nfrom labster.forms.workflow import ConfirmerFinalisationForm, ConfirmerRecevabiliteForm, WorkflowForm\nfrom labster.lib.workflow import State, Transition, Workflow\nlogger = structlog.get_logger()\n\n\nclass EnEdition(State):\n label = 'En édition'\n next_action = 'Edition à finaliser et à soumettre'\n\n def on_enter(self, workflow):\n case = workflow.case\n case.active = True\n case.editable = True\n\n def task_owners(self, workflow):\n case = workflow.case\n return {u for u in [case.gestionnaire, case.porteur] if u}\n\n\nclass EnValidation(State):\n label = 'En cours de validation hiérarchique'\n label_short = 'En validation'\n next_action = 'Demande à considérer pour validation'\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\ndef next_validation_stage(demande):\n from labster.domain.models.unites import DEPARTEMENT, EQUIPE, LABORATOIRE\n structure = demande.structure\n stage = demande.wf_stage\n if not stage:\n stage = structure\n if stage.type == EQUIPE and not structure.wf_must_validate(EQUIPE):\n stage = stage.parent\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(\n DEPARTEMENT):\n stage = stage.parent\n if stage.type == LABORATOIRE and not structure.wf_must_validate(\n LABORATOIRE):\n stage = None\n assert stage is None or stage.type in [EQUIPE, DEPARTEMENT, LABORATOIRE\n ]\n return stage\n stage = stage.parent\n if not stage:\n return None\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(DEPARTEMENT\n ):\n stage = stage.parent\n if not stage:\n return None\n if stage.type == LABORATOIRE and not structure.wf_must_validate(LABORATOIRE\n ):\n stage = None\n if not stage:\n return None\n if stage == None or stage.type in [DEPARTEMENT, LABORATOIRE]:\n return stage\n return None\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\nEN_EDITION = EnEdition()\nEN_VALIDATION = EnValidation()\nEN_VERIFICATION = EnVerification()\nEN_INSTRUCTION = EnInstruction()\nTRAITEE = Traitee()\nREJETEE = Rejetee()\nABANDONNEE = Abandonnee()\nACTIVE_STATES: list[State] = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION,\n EN_INSTRUCTION]\nINACTIVE_STATES: list[State] = [TRAITEE, REJETEE, ABANDONNEE]\nALL_STATES: list[State] = ACTIVE_STATES + INACTIVE_STATES\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\nABANDONNER = Abandonner()\nDESARCHIVER = Desarchiver()\nSOUMETTRE = Soumettre()\nPRENDRE_LA_MAIN_GESTIONNAIRE = PrendreLaMainGestionnaire()\nPRENDRE_LA_MAIN_DGRTT = PrendreLaMainDgrtt()\nVALIDER_DIR = ValiderDir()\nREQUERIR_MODIFICATION_DIR = RequerirModificationDir()\nACCUSER_RECEPTION = AccuserReception()\nREQUERIR_MODIFICATION_DGRTT = RequerirModificationDgrtt()\nCONFIRMER_RECEVABILITE_DGRTT = ConfirmerRecevabiliteDgrtt()\nCONFIRMER_FINALISATION_DGRTT = ConfirmerFinalisationDgrtt()\nREJETER_DGRTT = RejeterDgrtt()\nCOMMENTER = Commenter()\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\nlogger = structlog.get_logger()\n\n\nclass EnEdition(State):\n label = 'En édition'\n next_action = 'Edition à finaliser et à soumettre'\n\n def on_enter(self, workflow):\n case = workflow.case\n case.active = True\n case.editable = True\n\n def task_owners(self, workflow):\n case = workflow.case\n return {u for u in [case.gestionnaire, case.porteur] if u}\n\n\nclass EnValidation(State):\n label = 'En cours de validation hiérarchique'\n label_short = 'En validation'\n next_action = 'Demande à considérer pour validation'\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\ndef next_validation_stage(demande):\n from labster.domain.models.unites import DEPARTEMENT, EQUIPE, LABORATOIRE\n structure = demande.structure\n stage = demande.wf_stage\n if not stage:\n stage = structure\n if stage.type == EQUIPE and not structure.wf_must_validate(EQUIPE):\n stage = stage.parent\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(\n DEPARTEMENT):\n stage = stage.parent\n if stage.type == LABORATOIRE and not structure.wf_must_validate(\n LABORATOIRE):\n stage = None\n assert stage is None or stage.type in [EQUIPE, DEPARTEMENT, LABORATOIRE\n ]\n return stage\n stage = stage.parent\n if not stage:\n return None\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(DEPARTEMENT\n ):\n stage = stage.parent\n if not stage:\n return None\n if stage.type == LABORATOIRE and not structure.wf_must_validate(LABORATOIRE\n ):\n stage = None\n if not stage:\n return None\n if stage == None or stage.type in [DEPARTEMENT, LABORATOIRE]:\n return stage\n return None\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\nEN_EDITION = EnEdition()\nEN_VALIDATION = EnValidation()\nEN_VERIFICATION = EnVerification()\nEN_INSTRUCTION = EnInstruction()\nTRAITEE = Traitee()\nREJETEE = Rejetee()\nABANDONNEE = Abandonnee()\nACTIVE_STATES: list[State] = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION,\n EN_INSTRUCTION]\nINACTIVE_STATES: list[State] = [TRAITEE, REJETEE, ABANDONNEE]\nALL_STATES: list[State] = ACTIVE_STATES + INACTIVE_STATES\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\nABANDONNER = Abandonner()\nDESARCHIVER = Desarchiver()\nSOUMETTRE = Soumettre()\nPRENDRE_LA_MAIN_GESTIONNAIRE = PrendreLaMainGestionnaire()\nPRENDRE_LA_MAIN_DGRTT = PrendreLaMainDgrtt()\nVALIDER_DIR = ValiderDir()\nREQUERIR_MODIFICATION_DIR = RequerirModificationDir()\nACCUSER_RECEPTION = AccuserReception()\nREQUERIR_MODIFICATION_DGRTT = RequerirModificationDgrtt()\nCONFIRMER_RECEVABILITE_DGRTT = ConfirmerRecevabiliteDgrtt()\nCONFIRMER_FINALISATION_DGRTT = ConfirmerFinalisationDgrtt()\nREJETER_DGRTT = RejeterDgrtt()\nCOMMENTER = Commenter()\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass EnEdition(State):\n label = 'En édition'\n next_action = 'Edition à finaliser et à soumettre'\n\n def on_enter(self, workflow):\n case = workflow.case\n case.active = True\n case.editable = True\n\n def task_owners(self, workflow):\n case = workflow.case\n return {u for u in [case.gestionnaire, case.porteur] if u}\n\n\nclass EnValidation(State):\n label = 'En cours de validation hiérarchique'\n label_short = 'En validation'\n next_action = 'Demande à considérer pour validation'\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\ndef next_validation_stage(demande):\n from labster.domain.models.unites import DEPARTEMENT, EQUIPE, LABORATOIRE\n structure = demande.structure\n stage = demande.wf_stage\n if not stage:\n stage = structure\n if stage.type == EQUIPE and not structure.wf_must_validate(EQUIPE):\n stage = stage.parent\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(\n DEPARTEMENT):\n stage = stage.parent\n if stage.type == LABORATOIRE and not structure.wf_must_validate(\n LABORATOIRE):\n stage = None\n assert stage is None or stage.type in [EQUIPE, DEPARTEMENT, LABORATOIRE\n ]\n return stage\n stage = stage.parent\n if not stage:\n return None\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(DEPARTEMENT\n ):\n stage = stage.parent\n if not stage:\n return None\n if stage.type == LABORATOIRE and not structure.wf_must_validate(LABORATOIRE\n ):\n stage = None\n if not stage:\n return None\n if stage == None or stage.type in [DEPARTEMENT, LABORATOIRE]:\n return stage\n return None\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\nACTIVE_STATES: list[State] = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION,\n EN_INSTRUCTION]\nINACTIVE_STATES: list[State] = [TRAITEE, REJETEE, ABANDONNEE]\nALL_STATES: list[State] = ACTIVE_STATES + INACTIVE_STATES\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass EnEdition(State):\n label = 'En édition'\n next_action = 'Edition à finaliser et à soumettre'\n\n def on_enter(self, workflow):\n case = workflow.case\n case.active = True\n case.editable = True\n\n def task_owners(self, workflow):\n case = workflow.case\n return {u for u in [case.gestionnaire, case.porteur] if u}\n\n\nclass EnValidation(State):\n label = 'En cours de validation hiérarchique'\n label_short = 'En validation'\n next_action = 'Demande à considérer pour validation'\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\ndef next_validation_stage(demande):\n from labster.domain.models.unites import DEPARTEMENT, EQUIPE, LABORATOIRE\n structure = demande.structure\n stage = demande.wf_stage\n if not stage:\n stage = structure\n if stage.type == EQUIPE and not structure.wf_must_validate(EQUIPE):\n stage = stage.parent\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(\n DEPARTEMENT):\n stage = stage.parent\n if stage.type == LABORATOIRE and not structure.wf_must_validate(\n LABORATOIRE):\n stage = None\n assert stage is None or stage.type in [EQUIPE, DEPARTEMENT, LABORATOIRE\n ]\n return stage\n stage = stage.parent\n if not stage:\n return None\n if stage.type == DEPARTEMENT and not structure.wf_must_validate(DEPARTEMENT\n ):\n stage = stage.parent\n if not stage:\n return None\n if stage.type == LABORATOIRE and not structure.wf_must_validate(LABORATOIRE\n ):\n stage = None\n if not stage:\n return None\n if stage == None or stage.type in [DEPARTEMENT, LABORATOIRE]:\n return stage\n return None\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass EnEdition(State):\n label = 'En édition'\n next_action = 'Edition à finaliser et à soumettre'\n\n def on_enter(self, workflow):\n case = workflow.case\n case.active = True\n case.editable = True\n\n def task_owners(self, workflow):\n case = workflow.case\n return {u for u in [case.gestionnaire, case.porteur] if u}\n\n\nclass EnValidation(State):\n label = 'En cours de validation hiérarchique'\n label_short = 'En validation'\n next_action = 'Demande à considérer pour validation'\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\n<function token>\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass EnEdition(State):\n <assignment token>\n <assignment token>\n\n def on_enter(self, workflow):\n case = workflow.case\n case.active = True\n case.editable = True\n\n def task_owners(self, workflow):\n case = workflow.case\n return {u for u in [case.gestionnaire, case.porteur] if u}\n\n\nclass EnValidation(State):\n label = 'En cours de validation hiérarchique'\n label_short = 'En validation'\n next_action = 'Demande à considérer pour validation'\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\n<function token>\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass EnEdition(State):\n <assignment token>\n <assignment token>\n <function token>\n\n def task_owners(self, workflow):\n case = workflow.case\n return {u for u in [case.gestionnaire, case.porteur] if u}\n\n\nclass EnValidation(State):\n label = 'En cours de validation hiérarchique'\n label_short = 'En validation'\n next_action = 'Demande à considérer pour validation'\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\n<function token>\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass EnEdition(State):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass EnValidation(State):\n label = 'En cours de validation hiérarchique'\n label_short = 'En validation'\n next_action = 'Demande à considérer pour validation'\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\n<function token>\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass EnValidation(State):\n label = 'En cours de validation hiérarchique'\n label_short = 'En validation'\n next_action = 'Demande à considérer pour validation'\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\n<function token>\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass EnValidation(State):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def task_owners(self, workflow):\n demande = workflow.case\n if not demande.wf_stage:\n logger.warning(\n f\"Warning: la demande {demande.id} n'a pas de validation_stage\"\n )\n demande.wf_stage = next_validation_stage(demande)\n return demande.wf_stage.direction\n\n\n<function token>\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass EnValidation(State):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\n<function token>\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n\n\nclass EnVerification(State):\n label = 'Recevabilité en cours de vérification'\n label_short = 'En vérification'\n next_action = 'Recevabilité à confirmer'\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n\n\nclass EnVerification(State):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n\n\nclass EnVerification(State):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def on_enter(self, workflow):\n workflow.set_value('ar_envoye', False)\n <function token>\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n\n\nclass EnVerification(State):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n\n\nclass EnInstruction(State):\n label = \"En cours d'instruction par la DR&I\"\n label_short = 'En instruction'\n next_action = 'Instruction à mener et finaliser'\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n\n\nclass EnInstruction(State):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def task_owners(self, workflow):\n case = workflow.case\n if case.contact_dgrtt:\n return [case.contact_dgrtt]\n else:\n return []\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n\n\nclass EnInstruction(State):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n\n\nclass Traitee(State):\n label = 'Traitée par la DR&I'\n label_short = 'Traitée'\n is_final = True\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n\n\nclass Traitee(State):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Rejetee(State):\n label = 'Rejetée par la DR&I'\n label_short = 'Rejetée'\n is_final = True\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Rejetee(State):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Abandonnee(State):\n label = 'Abandonnée par le porteur'\n label_short = 'Abandonnée'\n is_final = True\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Abandonnee(State):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n label = 'Abandonner la demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = ABANDONNEE\n message = '{actor} a abandonné la demande.'\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_porteur_or_gdl()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if old_state == EN_EDITION:\n return []\n if old_state == EN_VALIDATION:\n return case.owners + case.structure.direction\n if old_state in (EN_VERIFICATION, EN_INSTRUCTION):\n return case.owners + [case.contact_dgrtt]\n raise RuntimeError(f'Unknown state: {old_state}')\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n\n\nclass Abandonner(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass Desarchiver(Transition):\n label = 'Désarchiver la demande'\n category = 'danger'\n from_states = [ABANDONNEE, REJETEE, TRAITEE]\n message = '{actor} a désarchivé la demande.'\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass Desarchiver(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass Desarchiver(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def apply(self, workflow, data):\n from labster.domain.models.demandes import Demande\n demande = workflow.case\n old_state_id = demande.wf_history[-1].get('old_state', 'EN_EDITION')\n old_state = workflow.get_state_by_id(old_state_id)\n old_state.enter(workflow)\n demande.active = True\n demande.editable = True\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n <function token>\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass Desarchiver(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n <function token>\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass Desarchiver(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n\n\nclass Soumettre(Transition):\n label = 'Soumettre la demande'\n from_states = [EN_EDITION]\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n\n\nclass Soumettre(Transition):\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n\n def message(self, workflow):\n if workflow.get_value('validee_hierarchie'):\n return (\n '{actor} a resoumis sa demande sans revalidation hiérarchique')\n else:\n return '{actor} a soumis sa demande pour validation hiérarchique.'\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n\n\nclass Soumettre(Transition):\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if workflow.get_value('validee_hierarchie'):\n return [case.contact_dgrtt]\n else:\n return case.wf_stage.direction\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n\n\nclass Soumettre(Transition):\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n demande = workflow.case\n structure = demande.structure\n if not demande.is_valid():\n return False\n if workflow.actor == demande.porteur:\n return True\n if (workflow.actor.structure and workflow.actor.structure.\n permettre_soummission_directe and workflow.actor in workflow.\n actor.structure.get_gestionnaires()):\n return True\n if (structure.permettre_soummission_directe and workflow.actor in\n structure.get_gestionnaires()):\n return True\n if (workflow.get_value('validee_hierarchie') and structure.\n permettre_reponse_directe and workflow.actor == demande.\n gestionnaire):\n return True\n return False\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n\n\nclass Soumettre(Transition):\n <assignment token>\n <assignment token>\n <function token>\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.editable = False\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n if workflow.get_value('validee_hierarchie'):\n if workflow.get_value('recevable'):\n EN_INSTRUCTION.enter(workflow)\n else:\n EN_VERIFICATION.enter(workflow)\n else:\n demande.wf_stage = next_validation_stage(demande)\n EN_VALIDATION.enter(workflow)\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n\n\nclass Soumettre(Transition):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True)\n return WorkflowForm()\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n\n\nclass Soumettre(Transition):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n\n\nclass AbstractPrendreLaMain(Transition):\n label = 'Prendre la main sur la demande'\n category = 'danger'\n message = '{actor} a pris la main sur la demande.'\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n\n\nclass AbstractPrendreLaMain(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_users_to_notify(self, workflow, old_state):\n from labster.domain.models.profiles import Profile\n case = workflow.case\n actor = workflow.actor\n user_ids = {entry['actor_id'] for entry in case.wf_history}\n users = []\n for id in user_ids:\n try:\n user = Profile.query.get(id)\n if user != actor:\n users.append(user)\n except Exception:\n pass\n return users\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n\n\nclass AbstractPrendreLaMain(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n <assignment token>\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('gestionnaire', case) and actor not in (case.\n gestionnaire, case.porteur)\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n <assignment token>\n <function token>\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.gestionnaire = actor\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrendreLaMainGestionnaire(AbstractPrendreLaMain):\n <assignment token>\n <function token>\n <function token>\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n from_states = ACTIVE_STATES\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n <assignment token>\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n\n def apply(self, workflow, data):\n case = workflow.case\n actor = workflow.actor\n case.contact_dgrtt = actor\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n <assignment token>\n\n def precondition(self, workflow):\n case = workflow.case\n actor = workflow.actor\n return actor.has_role('dgrtt') and not actor == case.contact_dgrtt\n <function token>\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrendreLaMainDgrtt(AbstractPrendreLaMain):\n <assignment token>\n <function token>\n <function token>\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ValiderDir(Transition):\n label = 'Valider la demande'\n from_states = [EN_VALIDATION]\n message = 'Demande validée par la hiérarchie ({actor}).'\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ValiderDir(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ValiderDir(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.assigne_contact_dgrtt()\n workflow.set_value('validee_hierarchie', True)\n demande.date_effective = date.today()\n next_stage = next_validation_stage(demande)\n demande.wf_stage = next_stage\n if not next_stage:\n EN_VERIFICATION.enter(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ValiderDir(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n if case.contact_dgrtt:\n return case.owners + [case.contact_dgrtt]\n else:\n return case.owners\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ValiderDir(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDir(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VALIDATION]\n to_state = EN_EDITION\n message = (\n 'Demande de compléments / modifications par {actor} (direction labo/département/équipe) pour vérification de recevabilité.'\n )\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDir(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDir(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor in workflow.state.task_owners(workflow)\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n <function token>\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDir(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def apply(self, workflow, data):\n demande = workflow.case\n demande.wf_stage = None\n workflow.set_value('validee_hierarchie', False)\n <function token>\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDir(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDir(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass AccuserReception(Transition):\n label = 'Accuser réception en attendant vérification ultérieure'\n from_states = [EN_VERIFICATION]\n to_state = EN_VERIFICATION\n message = 'Accusé de réception envoyé par {actor} (contact).'\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass AccuserReception(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt() and not workflow.get_value(\n 'ar_envoye', False)\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass AccuserReception(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def apply(self, workflow, data):\n workflow.set_value('ar_envoye', True)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass AccuserReception(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass AccuserReception(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDgrtt(Transition):\n label = 'Requérir modification / complément'\n from_states = [EN_VERIFICATION, EN_INSTRUCTION]\n to_state = EN_EDITION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n\n def message(self, workflow):\n if workflow.state == EN_VERIFICATION:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour vérification de recevabilité.'\n )\n else:\n return (\n 'Demande de compléments / modifications par {actor} (contact) pour instruction.'\n )\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n if data.get('resoumission'):\n workflow.set_value('validee_hierarchie', False)\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n <function token>\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n if workflow.get_value('validee_hierarchie'):\n return WorkflowForm(ask_for_revalidation=True, require_note=True)\n return WorkflowForm(require_note=True)\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequerirModificationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n label = 'Confirmer recevabilité'\n from_states = [EN_VERIFICATION]\n to_state = EN_INSTRUCTION\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def apply(self, workflow, data):\n workflow.set_value('recevable', True)\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n <function token>\n\n def send_notification(self, workflow):\n case = workflow.case\n subject = 'Recevabilité de votre demande par la DR&I'\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n send_email(case.owners, subject, 'notif-demande-recevable.html', ctx)\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n <function token>\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n\n def message(self, workflow):\n demande = workflow.case\n tpl = (\n 'Recevabilité confirmée par {actor} (contact). No Infolab: %s.' %\n demande.no_infolab)\n return tpl\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n <function token>\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n <function token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n <function token>\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n return ConfirmerRecevabiliteForm()\n <function token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerRecevabiliteDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n label = 'Confirmer finalisation'\n from_states = [EN_INSTRUCTION]\n to_state = TRAITEE\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def get_form(self, workflow, **kw):\n return ConfirmerFinalisationForm()\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n <function token>\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Finalisation de votre demande par la DR&I'\n du = case.laboratoire.direction\n recipients = case.owners + du\n send_email(recipients, subject, 'notif-demande-finalisee.html', ctx)\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n return workflow.actor_is_contact_dgrtt()\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n <function token>\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n <function token>\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n <function token>\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n <function token>\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n <function token>\n\n def message(self, workflow):\n demande = workflow.case\n tpl = ('Traitement finalisé par {actor} (contact).No eOTP: %s.' %\n demande.no_eotp)\n return tpl\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n <function token>\n <function token>\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConfirmerFinalisationDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RejeterDgrtt(Transition):\n label = 'Rejeter / abandonner demande'\n category = 'danger'\n from_states = [EN_EDITION, EN_VALIDATION, EN_VERIFICATION, EN_INSTRUCTION]\n to_state = REJETEE\n message = 'Demande rejetée / abandonnées par {actor} (contact).'\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RejeterDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n\n def get_users_to_notify(self, workflow, old_state):\n case = workflow.case\n return case.owners\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RejeterDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n actor = workflow.actor\n return actor.has_role('dgrtt')\n <function token>\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RejeterDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RejeterDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Rejet de votre demande par la DR&I'\n send_email(case.owners, subject, 'notif-demande-rejetee.html', ctx)\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RejeterDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RejeterDgrtt(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Commenter(Transition):\n label = 'Envoyer un message'\n category = 'success'\n from_states = [EN_EDITION, EN_VERIFICATION, EN_INSTRUCTION]\n message = '{actor} a posté le commentaire ou la question suivante: '\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Commenter(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def precondition(self, workflow):\n actor = workflow.actor\n demande = workflow.case\n if workflow.state == EN_EDITION and not demande.contact_dgrtt:\n return False\n return actor in self._get_stakeholder(workflow)\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Commenter(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n\n def _get_stakeholder(self, workflow):\n from .profiles import Profile\n from .unites import OrgUnit\n demande = workflow.case\n structure = demande.structure\n stakeholders = set(demande.owners)\n if demande.contact_dgrtt:\n stakeholders.add(demande.contact_dgrtt)\n directeurs = structure.get_directeurs()\n for directeur in directeurs:\n stakeholders.add(directeur)\n for history_item in demande.wf_history:\n actor_id = history_item['actor_id']\n if actor_id:\n actor = Profile.query.get(actor_id)\n stakeholders.add(actor)\n return stakeholders\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Commenter(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def get_users_to_notify(self, workflow, old_state):\n return self._get_stakeholder(workflow)\n <function token>\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Commenter(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_form(self, workflow, **kw):\n return WorkflowForm(require_note=True)\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Commenter(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n\n def send_notification(self, workflow):\n case = workflow.case\n ctx = {'transition': self, 'demande': case, 'workflow': workflow,\n 'now': datetime.now()}\n subject = 'Un commentaire sur votre demande'\n send_email(case.owners, subject, 'notif-demande-comment.html', ctx)\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Commenter(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def apply(self, workflow, data):\n self.send_notification(workflow)\n <function token>\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Commenter(Transition):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n initial_state = EN_EDITION\n states = ALL_STATES\n transitions = [SOUMETTRE, PRENDRE_LA_MAIN_GESTIONNAIRE, VALIDER_DIR,\n PRENDRE_LA_MAIN_DGRTT, REQUERIR_MODIFICATION_DIR, ACCUSER_RECEPTION,\n CONFIRMER_RECEVABILITE_DGRTT, CONFIRMER_FINALISATION_DGRTT,\n REQUERIR_MODIFICATION_DGRTT, REJETER_DGRTT, ABANDONNER, DESARCHIVER,\n COMMENTER]\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def actor_is_contact_dgrtt(self):\n return self.actor == self.case.contact_dgrtt\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def actor_is_porteur_or_gdl(self):\n return self.actor in (self.case.porteur, self.case.gestionnaire)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass LabsterWorkflow(Workflow):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<class token>\n"
] | false |
98,617 |
4adcd2fbfab9ad148ae35a345f09d23dc8389035
|
expected_output = {
"GigabitEthernet0/0/0/1.501": {
"status": "up",
"outer_vlan": ".1Q:501",
"vlan_id": "501",
"mtu": 1518,
"rewrite_num_of_tags_pop": 1,
"rewrite_num_of_tags_push": 0,
}
}
|
[
"expected_output = {\n \"GigabitEthernet0/0/0/1.501\": {\n \"status\": \"up\",\n \"outer_vlan\": \".1Q:501\",\n \"vlan_id\": \"501\",\n \"mtu\": 1518,\n \"rewrite_num_of_tags_pop\": 1,\n \"rewrite_num_of_tags_push\": 0,\n }\n }",
"expected_output = {'GigabitEthernet0/0/0/1.501': {'status': 'up',\n 'outer_vlan': '.1Q:501', 'vlan_id': '501', 'mtu': 1518,\n 'rewrite_num_of_tags_pop': 1, 'rewrite_num_of_tags_push': 0}}\n",
"<assignment token>\n"
] | false |
98,618 |
d58c5a9d6dc09b0604f5985d080417432aabb0aa
|
print("Hello text")
print(7)
greeting = "Hello"
print(greeting)
# greeting2 = input("Write a greeting: ")
# print(greeting2)
a = 2
b = 3
print(a + b)
print(type(2))
# age = input("Enter your age: ")
# new_age = int(age) + 50
# print(new_age)
print(3**2)
newgret = greeting.replace("e", "i")
newgret = greeting.replace("e", "i", 1)
print(newgret)
# print(dir(newgret))
print(newgret[0])
print(newgret[-1])
print(newgret[0:3])
print(newgret[:4])
print(newgret[3:])
# List mutable
c = ["H", 2, "Hello"]
print(c)
print(type(c[1]))
c.append(4)
print(c)
c.remove("H")
print(c)
# Tuple not mutable
t = ("Hello", 2 , 4.6)
print(t)
# Dictionary unordered
d = {"Name" : "John", "Surname" : "Smith", "Cities": ("Porto", "San diego", "Bali")}
print(d)
print(d["Name"])
print(d["Cities"][1])
def minutes_to_hours(seconds, minutes=70):
hours = (minutes / 60.0) + (seconds / 3600)
return hours
print(minutes_to_hours(300))
# def age_foo(age):
# new_age = float(age) + 50
# return new_age
# age = input("Enter your age: ")
# if age < 150:
# print(age_foo(age))
# else:
# print("How it is possible?")
a = 5
if a < 5:
print("less than 5")
elif a == 5:
print("equal to 5")
else:
print("equal or greater than 5")
emails = ['[email protected]', '[email protected]', '[email protected]']
for email in emails:
if 'gmail' in email:
print(email)
# def currency_converter(rate, euros):
# dollars = euros * rate
# return dollars
# r = input("Enter rate: ")
# e = input("Enter euros: ")
# print(currency_converter(r, e))
# password = ""
# while password != "python123":
# password = input("Enter password: ")
# if password == 'python123':
# print("You are logged in!")
# else:
# print("Sorry, try again!")
names=['james', 'john', 'jack']
email_domains=['gmail', 'yahoo']
for i,j in zip(names, email_domains):
print(i, j)
file = open("example.txt", "r")
content = file.read()
print(content)
file.seek(0)
content2 = file.readlines()
print(content2)
content2 = [i.rstrip("\n") for i in content2]
print(content2)
file = open("example.txt", "w")
file.write("Line 1\n")
file.write("Line 2\n")
for i in range(10):
file.write("Line " + str(i) + "\n")
file = open("example.txt", "a")
file.write("Line 11\n")
file.close()
with open("example.txt", "a+") as file:
file.seek(0)
content = file.read()
print(content)
import os
print(os.listdir("."))
print(os.__file__)
"""
This script creates an empty file
"""
def create_file():
"""
This function creates an empty file
"""
with open("example.txt", "w") as file:
file.write("")
import datetime
print(datetime.datetime.now())
print(datetime.datetime(2016, 5, 13, 11))
print((datetime.datetime.now() - datetime.datetime(2016, 5, 13, 11)).days)
print(datetime.datetime.now().strftime("%Y-%m-%d-%H"))
print(datetime.datetime.now()+datetime.timedelta(days=2))
import time
lst = []
for i in range(5):
lst.append(str(datetime.datetime.now()))
#time.sleep(1)
print(lst)
|
[
"print(\"Hello text\")\nprint(7)\ngreeting = \"Hello\"\nprint(greeting)\n\n# greeting2 = input(\"Write a greeting: \")\n# print(greeting2)\n\na = 2\nb = 3\n\nprint(a + b)\nprint(type(2))\n\n# age = input(\"Enter your age: \")\n# new_age = int(age) + 50\n# print(new_age)\n\nprint(3**2)\n\nnewgret = greeting.replace(\"e\", \"i\")\nnewgret = greeting.replace(\"e\", \"i\", 1)\nprint(newgret)\n\n# print(dir(newgret))\n\nprint(newgret[0])\nprint(newgret[-1])\nprint(newgret[0:3])\nprint(newgret[:4])\nprint(newgret[3:])\n\n# List mutable\nc = [\"H\", 2, \"Hello\"]\nprint(c)\nprint(type(c[1]))\nc.append(4)\nprint(c)\nc.remove(\"H\")\nprint(c)\n\n# Tuple not mutable\nt = (\"Hello\", 2 , 4.6)\nprint(t)\n\n# Dictionary unordered\nd = {\"Name\" : \"John\", \"Surname\" : \"Smith\", \"Cities\": (\"Porto\", \"San diego\", \"Bali\")}\nprint(d)\nprint(d[\"Name\"])\nprint(d[\"Cities\"][1])\n\ndef minutes_to_hours(seconds, minutes=70):\n hours = (minutes / 60.0) + (seconds / 3600)\n return hours\n\nprint(minutes_to_hours(300))\n\n# def age_foo(age):\n# new_age = float(age) + 50\n# return new_age\n\n# age = input(\"Enter your age: \")\n\n# if age < 150:\n# print(age_foo(age))\n# else:\n# print(\"How it is possible?\")\n\na = 5\n\nif a < 5:\n print(\"less than 5\")\nelif a == 5:\n print(\"equal to 5\")\nelse:\n print(\"equal or greater than 5\")\n\nemails = ['[email protected]', '[email protected]', '[email protected]']\nfor email in emails:\n if 'gmail' in email:\n print(email)\n\n# def currency_converter(rate, euros):\n# dollars = euros * rate\n# return dollars\n# r = input(\"Enter rate: \")\n# e = input(\"Enter euros: \")\n# print(currency_converter(r, e))\n\n# password = \"\"\n# while password != \"python123\":\n# password = input(\"Enter password: \")\n# if password == 'python123':\n# print(\"You are logged in!\")\n# else:\n# print(\"Sorry, try again!\")\n\nnames=['james', 'john', 'jack']\nemail_domains=['gmail', 'yahoo']\n\nfor i,j in zip(names, email_domains):\n print(i, j)\n\nfile = open(\"example.txt\", \"r\")\ncontent = file.read()\nprint(content)\nfile.seek(0)\ncontent2 = file.readlines()\nprint(content2)\n\ncontent2 = [i.rstrip(\"\\n\") for i in content2]\nprint(content2)\n\nfile = open(\"example.txt\", \"w\")\nfile.write(\"Line 1\\n\")\nfile.write(\"Line 2\\n\")\n\nfor i in range(10):\n file.write(\"Line \" + str(i) + \"\\n\")\n\nfile = open(\"example.txt\", \"a\")\nfile.write(\"Line 11\\n\")\nfile.close()\n\nwith open(\"example.txt\", \"a+\") as file:\n file.seek(0)\n content = file.read()\n print(content)\n\nimport os\nprint(os.listdir(\".\"))\nprint(os.__file__)\n\n\"\"\"\nThis script creates an empty file\n\"\"\"\ndef create_file():\n \"\"\"\n This function creates an empty file\n \"\"\"\n with open(\"example.txt\", \"w\") as file:\n file.write(\"\")\n\nimport datetime\nprint(datetime.datetime.now())\nprint(datetime.datetime(2016, 5, 13, 11))\nprint((datetime.datetime.now() - datetime.datetime(2016, 5, 13, 11)).days)\nprint(datetime.datetime.now().strftime(\"%Y-%m-%d-%H\"))\nprint(datetime.datetime.now()+datetime.timedelta(days=2))\n\nimport time\nlst = []\nfor i in range(5):\n lst.append(str(datetime.datetime.now()))\n #time.sleep(1)\nprint(lst)",
"print('Hello text')\nprint(7)\ngreeting = 'Hello'\nprint(greeting)\na = 2\nb = 3\nprint(a + b)\nprint(type(2))\nprint(3 ** 2)\nnewgret = greeting.replace('e', 'i')\nnewgret = greeting.replace('e', 'i', 1)\nprint(newgret)\nprint(newgret[0])\nprint(newgret[-1])\nprint(newgret[0:3])\nprint(newgret[:4])\nprint(newgret[3:])\nc = ['H', 2, 'Hello']\nprint(c)\nprint(type(c[1]))\nc.append(4)\nprint(c)\nc.remove('H')\nprint(c)\nt = 'Hello', 2, 4.6\nprint(t)\nd = {'Name': 'John', 'Surname': 'Smith', 'Cities': ('Porto', 'San diego',\n 'Bali')}\nprint(d)\nprint(d['Name'])\nprint(d['Cities'][1])\n\n\ndef minutes_to_hours(seconds, minutes=70):\n hours = minutes / 60.0 + seconds / 3600\n return hours\n\n\nprint(minutes_to_hours(300))\na = 5\nif a < 5:\n print('less than 5')\nelif a == 5:\n print('equal to 5')\nelse:\n print('equal or greater than 5')\nemails = ['[email protected]', '[email protected]', '[email protected]']\nfor email in emails:\n if 'gmail' in email:\n print(email)\nnames = ['james', 'john', 'jack']\nemail_domains = ['gmail', 'yahoo']\nfor i, j in zip(names, email_domains):\n print(i, j)\nfile = open('example.txt', 'r')\ncontent = file.read()\nprint(content)\nfile.seek(0)\ncontent2 = file.readlines()\nprint(content2)\ncontent2 = [i.rstrip('\\n') for i in content2]\nprint(content2)\nfile = open('example.txt', 'w')\nfile.write('Line 1\\n')\nfile.write('Line 2\\n')\nfor i in range(10):\n file.write('Line ' + str(i) + '\\n')\nfile = open('example.txt', 'a')\nfile.write('Line 11\\n')\nfile.close()\nwith open('example.txt', 'a+') as file:\n file.seek(0)\n content = file.read()\n print(content)\nimport os\nprint(os.listdir('.'))\nprint(os.__file__)\n<docstring token>\n\n\ndef create_file():\n \"\"\"\n This function creates an empty file\n \"\"\"\n with open('example.txt', 'w') as file:\n file.write('')\n\n\nimport datetime\nprint(datetime.datetime.now())\nprint(datetime.datetime(2016, 5, 13, 11))\nprint((datetime.datetime.now() - datetime.datetime(2016, 5, 13, 11)).days)\nprint(datetime.datetime.now().strftime('%Y-%m-%d-%H'))\nprint(datetime.datetime.now() + datetime.timedelta(days=2))\nimport time\nlst = []\nfor i in range(5):\n lst.append(str(datetime.datetime.now()))\nprint(lst)\n",
"print('Hello text')\nprint(7)\ngreeting = 'Hello'\nprint(greeting)\na = 2\nb = 3\nprint(a + b)\nprint(type(2))\nprint(3 ** 2)\nnewgret = greeting.replace('e', 'i')\nnewgret = greeting.replace('e', 'i', 1)\nprint(newgret)\nprint(newgret[0])\nprint(newgret[-1])\nprint(newgret[0:3])\nprint(newgret[:4])\nprint(newgret[3:])\nc = ['H', 2, 'Hello']\nprint(c)\nprint(type(c[1]))\nc.append(4)\nprint(c)\nc.remove('H')\nprint(c)\nt = 'Hello', 2, 4.6\nprint(t)\nd = {'Name': 'John', 'Surname': 'Smith', 'Cities': ('Porto', 'San diego',\n 'Bali')}\nprint(d)\nprint(d['Name'])\nprint(d['Cities'][1])\n\n\ndef minutes_to_hours(seconds, minutes=70):\n hours = minutes / 60.0 + seconds / 3600\n return hours\n\n\nprint(minutes_to_hours(300))\na = 5\nif a < 5:\n print('less than 5')\nelif a == 5:\n print('equal to 5')\nelse:\n print('equal or greater than 5')\nemails = ['[email protected]', '[email protected]', '[email protected]']\nfor email in emails:\n if 'gmail' in email:\n print(email)\nnames = ['james', 'john', 'jack']\nemail_domains = ['gmail', 'yahoo']\nfor i, j in zip(names, email_domains):\n print(i, j)\nfile = open('example.txt', 'r')\ncontent = file.read()\nprint(content)\nfile.seek(0)\ncontent2 = file.readlines()\nprint(content2)\ncontent2 = [i.rstrip('\\n') for i in content2]\nprint(content2)\nfile = open('example.txt', 'w')\nfile.write('Line 1\\n')\nfile.write('Line 2\\n')\nfor i in range(10):\n file.write('Line ' + str(i) + '\\n')\nfile = open('example.txt', 'a')\nfile.write('Line 11\\n')\nfile.close()\nwith open('example.txt', 'a+') as file:\n file.seek(0)\n content = file.read()\n print(content)\n<import token>\nprint(os.listdir('.'))\nprint(os.__file__)\n<docstring token>\n\n\ndef create_file():\n \"\"\"\n This function creates an empty file\n \"\"\"\n with open('example.txt', 'w') as file:\n file.write('')\n\n\n<import token>\nprint(datetime.datetime.now())\nprint(datetime.datetime(2016, 5, 13, 11))\nprint((datetime.datetime.now() - datetime.datetime(2016, 5, 13, 11)).days)\nprint(datetime.datetime.now().strftime('%Y-%m-%d-%H'))\nprint(datetime.datetime.now() + datetime.timedelta(days=2))\n<import token>\nlst = []\nfor i in range(5):\n lst.append(str(datetime.datetime.now()))\nprint(lst)\n",
"print('Hello text')\nprint(7)\n<assignment token>\nprint(greeting)\n<assignment token>\nprint(a + b)\nprint(type(2))\nprint(3 ** 2)\n<assignment token>\nprint(newgret)\nprint(newgret[0])\nprint(newgret[-1])\nprint(newgret[0:3])\nprint(newgret[:4])\nprint(newgret[3:])\n<assignment token>\nprint(c)\nprint(type(c[1]))\nc.append(4)\nprint(c)\nc.remove('H')\nprint(c)\n<assignment token>\nprint(t)\n<assignment token>\nprint(d)\nprint(d['Name'])\nprint(d['Cities'][1])\n\n\ndef minutes_to_hours(seconds, minutes=70):\n hours = minutes / 60.0 + seconds / 3600\n return hours\n\n\nprint(minutes_to_hours(300))\n<assignment token>\nif a < 5:\n print('less than 5')\nelif a == 5:\n print('equal to 5')\nelse:\n print('equal or greater than 5')\n<assignment token>\nfor email in emails:\n if 'gmail' in email:\n print(email)\n<assignment token>\nfor i, j in zip(names, email_domains):\n print(i, j)\n<assignment token>\nprint(content)\nfile.seek(0)\n<assignment token>\nprint(content2)\n<assignment token>\nprint(content2)\n<assignment token>\nfile.write('Line 1\\n')\nfile.write('Line 2\\n')\nfor i in range(10):\n file.write('Line ' + str(i) + '\\n')\n<assignment token>\nfile.write('Line 11\\n')\nfile.close()\nwith open('example.txt', 'a+') as file:\n file.seek(0)\n content = file.read()\n print(content)\n<import token>\nprint(os.listdir('.'))\nprint(os.__file__)\n<docstring token>\n\n\ndef create_file():\n \"\"\"\n This function creates an empty file\n \"\"\"\n with open('example.txt', 'w') as file:\n file.write('')\n\n\n<import token>\nprint(datetime.datetime.now())\nprint(datetime.datetime(2016, 5, 13, 11))\nprint((datetime.datetime.now() - datetime.datetime(2016, 5, 13, 11)).days)\nprint(datetime.datetime.now().strftime('%Y-%m-%d-%H'))\nprint(datetime.datetime.now() + datetime.timedelta(days=2))\n<import token>\n<assignment token>\nfor i in range(5):\n lst.append(str(datetime.datetime.now()))\nprint(lst)\n",
"<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef minutes_to_hours(seconds, minutes=70):\n hours = minutes / 60.0 + seconds / 3600\n return hours\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<code token>\n<docstring token>\n\n\ndef create_file():\n \"\"\"\n This function creates an empty file\n \"\"\"\n with open('example.txt', 'w') as file:\n file.write('')\n\n\n<import token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n",
"<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<code token>\n<docstring token>\n\n\ndef create_file():\n \"\"\"\n This function creates an empty file\n \"\"\"\n with open('example.txt', 'w') as file:\n file.write('')\n\n\n<import token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n",
"<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<code token>\n<docstring token>\n<function token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
98,619 |
0107a7798e0d78943955318aa1789f5367d583de
|
"""Generate pruned datasets and train models"""
import argparse
import os
import re
import shutil
import subprocess
import tarfile
import numpy as np
PREPROCESS = "preprocess"
TRAIN = "train"
EVALUATE = "evaluate"
def main():
"""Identify model state with lowest validation set loss"""
parser = argparse.ArgumentParser()
parser.add_argument("-data_dir", required=True, help="Directory containing original data set in requisite folder structure (small part or all data)")
parser.add_argument("-features_filename", required=True, help="Features cloudpickle file that provides that pruning information")
parser.add_argument("-start_seed", type=int, default=1284171779)
parser.add_argument("-num_datasets", type=int, default=20)
parser.add_argument("-modes", choices=[PREPROCESS, TRAIN, EVALUATE], nargs="+", required=True)
args = parser.parse_args()
return pipeline(args)
def pipeline(args):
"""Pipeline"""
cwd = os.getcwd()
run_dirs = [f"run_dir_{seed}" for seed in range(args.start_seed, args.start_seed + args.num_datasets)]
if PREPROCESS in args.modes:
condor_dir = f"{os.path.dirname(os.path.realpath(__file__))}/condor"
for idx, run_dir in enumerate(run_dirs):
# Make directory
if not os.path.exists(run_dir):
os.makedirs(run_dir)
pruned_dir = f"{run_dir}/pruned"
seed = args.start_seed + idx
# Prune data
cmd = (f"python -m mimic3ext.prune_data -data_dir {args.data_dir} -features_filename {args.features_filename}"
f" -output_dir {pruned_dir} -randomize_features -seed {seed}")
subprocess.check_call(cmd, shell=True)
# Compress pruned data
with tarfile.open(f"{run_dir}/pruned.tar.gz", "w:gz") as tar_fp:
os.chdir(pruned_dir)
for filename in os.listdir("."):
tar_fp.add(filename)
os.chdir(cwd)
# Delete raw data files
shutil.rmtree(pruned_dir)
# Copy other files
for filename in os.listdir(condor_dir):
shutil.copy(f"{condor_dir}/{filename}", run_dir)
if TRAIN in args.modes:
for run_dir in run_dirs:
os.chdir(run_dir)
cmd = "condor_submit mimic.sub"
subprocess.check_call(cmd, shell=True)
os.chdir(cwd)
if EVALUATE in args.modes:
rocs = np.zeros((2, args.num_datasets))
prcs = np.zeros((2, args.num_datasets))
for idx, run_dir in enumerate(run_dirs):
logs = [f"{run_dir}/train_test.log", f"{run_dir}/test_best.log"]
for lidx, log in enumerate(logs):
with open(log, "r") as log_file:
for line in reversed(list(log_file)):
match = re.search(r"AUC of (PRC|ROC) = (0\.\d+)", line)
if match:
dtype, value = match.groups()
value = float(value)
if dtype == "PRC":
prcs[lidx, idx] = value
elif dtype == "ROC":
rocs[lidx, idx] = value
break # Found both AUPRC and AUROC
np.savez("aucs.npz", rocs=rocs, prcs=prcs)
if __name__ == "__main__":
main()
|
[
"\"\"\"Generate pruned datasets and train models\"\"\"\nimport argparse\nimport os\nimport re\nimport shutil\nimport subprocess\nimport tarfile\n\nimport numpy as np\n\n\nPREPROCESS = \"preprocess\"\nTRAIN = \"train\"\nEVALUATE = \"evaluate\"\n\n\ndef main():\n \"\"\"Identify model state with lowest validation set loss\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-data_dir\", required=True, help=\"Directory containing original data set in requisite folder structure (small part or all data)\")\n parser.add_argument(\"-features_filename\", required=True, help=\"Features cloudpickle file that provides that pruning information\")\n parser.add_argument(\"-start_seed\", type=int, default=1284171779)\n parser.add_argument(\"-num_datasets\", type=int, default=20)\n parser.add_argument(\"-modes\", choices=[PREPROCESS, TRAIN, EVALUATE], nargs=\"+\", required=True)\n args = parser.parse_args()\n return pipeline(args)\n\n\ndef pipeline(args):\n \"\"\"Pipeline\"\"\"\n cwd = os.getcwd()\n run_dirs = [f\"run_dir_{seed}\" for seed in range(args.start_seed, args.start_seed + args.num_datasets)]\n if PREPROCESS in args.modes:\n condor_dir = f\"{os.path.dirname(os.path.realpath(__file__))}/condor\"\n for idx, run_dir in enumerate(run_dirs):\n # Make directory\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n pruned_dir = f\"{run_dir}/pruned\"\n seed = args.start_seed + idx\n # Prune data\n cmd = (f\"python -m mimic3ext.prune_data -data_dir {args.data_dir} -features_filename {args.features_filename}\"\n f\" -output_dir {pruned_dir} -randomize_features -seed {seed}\")\n subprocess.check_call(cmd, shell=True)\n # Compress pruned data\n with tarfile.open(f\"{run_dir}/pruned.tar.gz\", \"w:gz\") as tar_fp:\n os.chdir(pruned_dir)\n for filename in os.listdir(\".\"):\n tar_fp.add(filename)\n os.chdir(cwd)\n # Delete raw data files\n shutil.rmtree(pruned_dir)\n # Copy other files\n for filename in os.listdir(condor_dir):\n shutil.copy(f\"{condor_dir}/{filename}\", run_dir)\n if TRAIN in args.modes:\n for run_dir in run_dirs:\n os.chdir(run_dir)\n cmd = \"condor_submit mimic.sub\"\n subprocess.check_call(cmd, shell=True)\n os.chdir(cwd)\n if EVALUATE in args.modes:\n rocs = np.zeros((2, args.num_datasets))\n prcs = np.zeros((2, args.num_datasets))\n for idx, run_dir in enumerate(run_dirs):\n logs = [f\"{run_dir}/train_test.log\", f\"{run_dir}/test_best.log\"]\n for lidx, log in enumerate(logs):\n with open(log, \"r\") as log_file:\n for line in reversed(list(log_file)):\n match = re.search(r\"AUC of (PRC|ROC) = (0\\.\\d+)\", line)\n if match:\n dtype, value = match.groups()\n value = float(value)\n if dtype == \"PRC\":\n prcs[lidx, idx] = value\n elif dtype == \"ROC\":\n rocs[lidx, idx] = value\n break # Found both AUPRC and AUROC\n np.savez(\"aucs.npz\", rocs=rocs, prcs=prcs)\n\n\nif __name__ == \"__main__\":\n main()\n",
"<docstring token>\nimport argparse\nimport os\nimport re\nimport shutil\nimport subprocess\nimport tarfile\nimport numpy as np\nPREPROCESS = 'preprocess'\nTRAIN = 'train'\nEVALUATE = 'evaluate'\n\n\ndef main():\n \"\"\"Identify model state with lowest validation set loss\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-data_dir', required=True, help=\n 'Directory containing original data set in requisite folder structure (small part or all data)'\n )\n parser.add_argument('-features_filename', required=True, help=\n 'Features cloudpickle file that provides that pruning information')\n parser.add_argument('-start_seed', type=int, default=1284171779)\n parser.add_argument('-num_datasets', type=int, default=20)\n parser.add_argument('-modes', choices=[PREPROCESS, TRAIN, EVALUATE],\n nargs='+', required=True)\n args = parser.parse_args()\n return pipeline(args)\n\n\ndef pipeline(args):\n \"\"\"Pipeline\"\"\"\n cwd = os.getcwd()\n run_dirs = [f'run_dir_{seed}' for seed in range(args.start_seed, args.\n start_seed + args.num_datasets)]\n if PREPROCESS in args.modes:\n condor_dir = f'{os.path.dirname(os.path.realpath(__file__))}/condor'\n for idx, run_dir in enumerate(run_dirs):\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n pruned_dir = f'{run_dir}/pruned'\n seed = args.start_seed + idx\n cmd = (\n f'python -m mimic3ext.prune_data -data_dir {args.data_dir} -features_filename {args.features_filename} -output_dir {pruned_dir} -randomize_features -seed {seed}'\n )\n subprocess.check_call(cmd, shell=True)\n with tarfile.open(f'{run_dir}/pruned.tar.gz', 'w:gz') as tar_fp:\n os.chdir(pruned_dir)\n for filename in os.listdir('.'):\n tar_fp.add(filename)\n os.chdir(cwd)\n shutil.rmtree(pruned_dir)\n for filename in os.listdir(condor_dir):\n shutil.copy(f'{condor_dir}/{filename}', run_dir)\n if TRAIN in args.modes:\n for run_dir in run_dirs:\n os.chdir(run_dir)\n cmd = 'condor_submit mimic.sub'\n subprocess.check_call(cmd, shell=True)\n os.chdir(cwd)\n if EVALUATE in args.modes:\n rocs = np.zeros((2, args.num_datasets))\n prcs = np.zeros((2, args.num_datasets))\n for idx, run_dir in enumerate(run_dirs):\n logs = [f'{run_dir}/train_test.log', f'{run_dir}/test_best.log']\n for lidx, log in enumerate(logs):\n with open(log, 'r') as log_file:\n for line in reversed(list(log_file)):\n match = re.search('AUC of (PRC|ROC) = (0\\\\.\\\\d+)', line\n )\n if match:\n dtype, value = match.groups()\n value = float(value)\n if dtype == 'PRC':\n prcs[lidx, idx] = value\n elif dtype == 'ROC':\n rocs[lidx, idx] = value\n break\n np.savez('aucs.npz', rocs=rocs, prcs=prcs)\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\nPREPROCESS = 'preprocess'\nTRAIN = 'train'\nEVALUATE = 'evaluate'\n\n\ndef main():\n \"\"\"Identify model state with lowest validation set loss\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-data_dir', required=True, help=\n 'Directory containing original data set in requisite folder structure (small part or all data)'\n )\n parser.add_argument('-features_filename', required=True, help=\n 'Features cloudpickle file that provides that pruning information')\n parser.add_argument('-start_seed', type=int, default=1284171779)\n parser.add_argument('-num_datasets', type=int, default=20)\n parser.add_argument('-modes', choices=[PREPROCESS, TRAIN, EVALUATE],\n nargs='+', required=True)\n args = parser.parse_args()\n return pipeline(args)\n\n\ndef pipeline(args):\n \"\"\"Pipeline\"\"\"\n cwd = os.getcwd()\n run_dirs = [f'run_dir_{seed}' for seed in range(args.start_seed, args.\n start_seed + args.num_datasets)]\n if PREPROCESS in args.modes:\n condor_dir = f'{os.path.dirname(os.path.realpath(__file__))}/condor'\n for idx, run_dir in enumerate(run_dirs):\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n pruned_dir = f'{run_dir}/pruned'\n seed = args.start_seed + idx\n cmd = (\n f'python -m mimic3ext.prune_data -data_dir {args.data_dir} -features_filename {args.features_filename} -output_dir {pruned_dir} -randomize_features -seed {seed}'\n )\n subprocess.check_call(cmd, shell=True)\n with tarfile.open(f'{run_dir}/pruned.tar.gz', 'w:gz') as tar_fp:\n os.chdir(pruned_dir)\n for filename in os.listdir('.'):\n tar_fp.add(filename)\n os.chdir(cwd)\n shutil.rmtree(pruned_dir)\n for filename in os.listdir(condor_dir):\n shutil.copy(f'{condor_dir}/{filename}', run_dir)\n if TRAIN in args.modes:\n for run_dir in run_dirs:\n os.chdir(run_dir)\n cmd = 'condor_submit mimic.sub'\n subprocess.check_call(cmd, shell=True)\n os.chdir(cwd)\n if EVALUATE in args.modes:\n rocs = np.zeros((2, args.num_datasets))\n prcs = np.zeros((2, args.num_datasets))\n for idx, run_dir in enumerate(run_dirs):\n logs = [f'{run_dir}/train_test.log', f'{run_dir}/test_best.log']\n for lidx, log in enumerate(logs):\n with open(log, 'r') as log_file:\n for line in reversed(list(log_file)):\n match = re.search('AUC of (PRC|ROC) = (0\\\\.\\\\d+)', line\n )\n if match:\n dtype, value = match.groups()\n value = float(value)\n if dtype == 'PRC':\n prcs[lidx, idx] = value\n elif dtype == 'ROC':\n rocs[lidx, idx] = value\n break\n np.savez('aucs.npz', rocs=rocs, prcs=prcs)\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef main():\n \"\"\"Identify model state with lowest validation set loss\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-data_dir', required=True, help=\n 'Directory containing original data set in requisite folder structure (small part or all data)'\n )\n parser.add_argument('-features_filename', required=True, help=\n 'Features cloudpickle file that provides that pruning information')\n parser.add_argument('-start_seed', type=int, default=1284171779)\n parser.add_argument('-num_datasets', type=int, default=20)\n parser.add_argument('-modes', choices=[PREPROCESS, TRAIN, EVALUATE],\n nargs='+', required=True)\n args = parser.parse_args()\n return pipeline(args)\n\n\ndef pipeline(args):\n \"\"\"Pipeline\"\"\"\n cwd = os.getcwd()\n run_dirs = [f'run_dir_{seed}' for seed in range(args.start_seed, args.\n start_seed + args.num_datasets)]\n if PREPROCESS in args.modes:\n condor_dir = f'{os.path.dirname(os.path.realpath(__file__))}/condor'\n for idx, run_dir in enumerate(run_dirs):\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n pruned_dir = f'{run_dir}/pruned'\n seed = args.start_seed + idx\n cmd = (\n f'python -m mimic3ext.prune_data -data_dir {args.data_dir} -features_filename {args.features_filename} -output_dir {pruned_dir} -randomize_features -seed {seed}'\n )\n subprocess.check_call(cmd, shell=True)\n with tarfile.open(f'{run_dir}/pruned.tar.gz', 'w:gz') as tar_fp:\n os.chdir(pruned_dir)\n for filename in os.listdir('.'):\n tar_fp.add(filename)\n os.chdir(cwd)\n shutil.rmtree(pruned_dir)\n for filename in os.listdir(condor_dir):\n shutil.copy(f'{condor_dir}/{filename}', run_dir)\n if TRAIN in args.modes:\n for run_dir in run_dirs:\n os.chdir(run_dir)\n cmd = 'condor_submit mimic.sub'\n subprocess.check_call(cmd, shell=True)\n os.chdir(cwd)\n if EVALUATE in args.modes:\n rocs = np.zeros((2, args.num_datasets))\n prcs = np.zeros((2, args.num_datasets))\n for idx, run_dir in enumerate(run_dirs):\n logs = [f'{run_dir}/train_test.log', f'{run_dir}/test_best.log']\n for lidx, log in enumerate(logs):\n with open(log, 'r') as log_file:\n for line in reversed(list(log_file)):\n match = re.search('AUC of (PRC|ROC) = (0\\\\.\\\\d+)', line\n )\n if match:\n dtype, value = match.groups()\n value = float(value)\n if dtype == 'PRC':\n prcs[lidx, idx] = value\n elif dtype == 'ROC':\n rocs[lidx, idx] = value\n break\n np.savez('aucs.npz', rocs=rocs, prcs=prcs)\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef main():\n \"\"\"Identify model state with lowest validation set loss\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-data_dir', required=True, help=\n 'Directory containing original data set in requisite folder structure (small part or all data)'\n )\n parser.add_argument('-features_filename', required=True, help=\n 'Features cloudpickle file that provides that pruning information')\n parser.add_argument('-start_seed', type=int, default=1284171779)\n parser.add_argument('-num_datasets', type=int, default=20)\n parser.add_argument('-modes', choices=[PREPROCESS, TRAIN, EVALUATE],\n nargs='+', required=True)\n args = parser.parse_args()\n return pipeline(args)\n\n\ndef pipeline(args):\n \"\"\"Pipeline\"\"\"\n cwd = os.getcwd()\n run_dirs = [f'run_dir_{seed}' for seed in range(args.start_seed, args.\n start_seed + args.num_datasets)]\n if PREPROCESS in args.modes:\n condor_dir = f'{os.path.dirname(os.path.realpath(__file__))}/condor'\n for idx, run_dir in enumerate(run_dirs):\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n pruned_dir = f'{run_dir}/pruned'\n seed = args.start_seed + idx\n cmd = (\n f'python -m mimic3ext.prune_data -data_dir {args.data_dir} -features_filename {args.features_filename} -output_dir {pruned_dir} -randomize_features -seed {seed}'\n )\n subprocess.check_call(cmd, shell=True)\n with tarfile.open(f'{run_dir}/pruned.tar.gz', 'w:gz') as tar_fp:\n os.chdir(pruned_dir)\n for filename in os.listdir('.'):\n tar_fp.add(filename)\n os.chdir(cwd)\n shutil.rmtree(pruned_dir)\n for filename in os.listdir(condor_dir):\n shutil.copy(f'{condor_dir}/{filename}', run_dir)\n if TRAIN in args.modes:\n for run_dir in run_dirs:\n os.chdir(run_dir)\n cmd = 'condor_submit mimic.sub'\n subprocess.check_call(cmd, shell=True)\n os.chdir(cwd)\n if EVALUATE in args.modes:\n rocs = np.zeros((2, args.num_datasets))\n prcs = np.zeros((2, args.num_datasets))\n for idx, run_dir in enumerate(run_dirs):\n logs = [f'{run_dir}/train_test.log', f'{run_dir}/test_best.log']\n for lidx, log in enumerate(logs):\n with open(log, 'r') as log_file:\n for line in reversed(list(log_file)):\n match = re.search('AUC of (PRC|ROC) = (0\\\\.\\\\d+)', line\n )\n if match:\n dtype, value = match.groups()\n value = float(value)\n if dtype == 'PRC':\n prcs[lidx, idx] = value\n elif dtype == 'ROC':\n rocs[lidx, idx] = value\n break\n np.savez('aucs.npz', rocs=rocs, prcs=prcs)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef pipeline(args):\n \"\"\"Pipeline\"\"\"\n cwd = os.getcwd()\n run_dirs = [f'run_dir_{seed}' for seed in range(args.start_seed, args.\n start_seed + args.num_datasets)]\n if PREPROCESS in args.modes:\n condor_dir = f'{os.path.dirname(os.path.realpath(__file__))}/condor'\n for idx, run_dir in enumerate(run_dirs):\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n pruned_dir = f'{run_dir}/pruned'\n seed = args.start_seed + idx\n cmd = (\n f'python -m mimic3ext.prune_data -data_dir {args.data_dir} -features_filename {args.features_filename} -output_dir {pruned_dir} -randomize_features -seed {seed}'\n )\n subprocess.check_call(cmd, shell=True)\n with tarfile.open(f'{run_dir}/pruned.tar.gz', 'w:gz') as tar_fp:\n os.chdir(pruned_dir)\n for filename in os.listdir('.'):\n tar_fp.add(filename)\n os.chdir(cwd)\n shutil.rmtree(pruned_dir)\n for filename in os.listdir(condor_dir):\n shutil.copy(f'{condor_dir}/{filename}', run_dir)\n if TRAIN in args.modes:\n for run_dir in run_dirs:\n os.chdir(run_dir)\n cmd = 'condor_submit mimic.sub'\n subprocess.check_call(cmd, shell=True)\n os.chdir(cwd)\n if EVALUATE in args.modes:\n rocs = np.zeros((2, args.num_datasets))\n prcs = np.zeros((2, args.num_datasets))\n for idx, run_dir in enumerate(run_dirs):\n logs = [f'{run_dir}/train_test.log', f'{run_dir}/test_best.log']\n for lidx, log in enumerate(logs):\n with open(log, 'r') as log_file:\n for line in reversed(list(log_file)):\n match = re.search('AUC of (PRC|ROC) = (0\\\\.\\\\d+)', line\n )\n if match:\n dtype, value = match.groups()\n value = float(value)\n if dtype == 'PRC':\n prcs[lidx, idx] = value\n elif dtype == 'ROC':\n rocs[lidx, idx] = value\n break\n np.savez('aucs.npz', rocs=rocs, prcs=prcs)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,620 |
9f0e83c136f2f801402d839c9da4a88ed347d664
|
import gym
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from collections import deque
import random
from Envrioment import *
import numpy as np
#deifne deep neural net calls
class DQN:
def __init__(self, env):
self.env = env
#define memeonry
self.memory = deque(maxlen=2000)
#future rewards depertiaon
self.gamma = 0.95
#expoloration/explotation intial value, decay and minium
self.epsilon = 1.0
self.epsilon_decay = 0.995
self.epsilon_min = 0.1
#learing rate
self.learing_rate = 0.01
#target model smoothing rate
self.tau = .125
#define model
self.model = self.create_model()
#define target model
self.target_model = self.create_model()
def create_model(self):
#define sequintail model
model = Sequential()
#get state shape
#add input layer
model.add(Dense(48, input_shape=(81,),
activation='relu'))
#add layers
model.add(Dense(48, activation="relu"))
model.add(Dense(48, activation="relu"))
#add output layer
model.add(Dense(19))
#comple model with adam opismiser and meen square rror lost
model.compile(loss='mean_squared_error',
optimizer=Adam(lr=self.learing_rate))
#return model
return model
#tell model to remember state
def remember(self, state, action, reward, newstate, done):
self.memory.append([state, action, reward, newstate, done])
#replay and train on a sample of memery data commtied to memory
def replay(self):
batch_size = 32
#if not enough info return
if len(self.memory) < batch_size:
return
#get random sample of states with bath size
samples = random.sample(self.memory, batch_size)
#itterate over samples
for sample in samples:
state, action, reward, newstate, done = sample
#predict action
target = self.target_model.predict(state)
#jsut aplly pervious reward if state was finshing state
if done:
target[0][action] = reward
#else use q function to weight both current reward and futre reward
#cacluate futre reward baseupon strengh of model predictions
else:
#get futere aciton cofinede for reward
Q_future = max(self.target_model.predict(newstate)[0])
#add the reward for the action pluse futre
target[0][action] = reward + Q_future * self.gamma
#fit model to reward offets function returns
print('fitting model')
self.model.fit(state, target, epochs=1, verbose=0)
#train target
def train_target(self):
weights = self.model.get_weights()
target_weights = self.target_model.get_weights()
for i in range(len(target_weights)):
#slowy convert target weights over time to the priamy model weights by tau each time. ie it will be 75 og weight. and 25 new weight and the next tik mroeso
target_weights[i] = weights[i] * self.tau + target_weights[i] * (1 - self.tau)
self.target_model.set_weights(target_weights)
#act model
def act(self, state):
self.epsilon *= self.epsilon_decay
self.epsilon = max(self.epsilon_min, self.epsilon)
if np.random.random() < self.epsilon:
#return random action space value
return random.randint(0, 18)
#else pic max vlaue
return np.argmax(self.model.predict(state))
#save model
def save_model(self, fn):
self.model.save(fn)
#define main function
def main():
env = Match(200)
trials = 1000
trial_len = 500
dqn_agent = DQN(env=env)
steps = []
#runn through trials
for trial in range(trials):
cur_state = env.reset().reshape(1, 81)
for step in range(trial_len):
#get action
action = dqn_agent.act(cur_state)
#step env
newstate, reward, done = env.step(0, action)
print('reward: {}'.format(reward))
#reshape newstate
newstate = newstate.reshape(1, 81)
#reward = reward if not done else -20
#remember date
dqn_agent.remember(cur_state, action, reward, newstate, done)
#replay sates from memeory to train
dqn_agent.replay()
#train target
dqn_agent.train_target()
#set curretns tate to new state
cur_state = newstate
#break if done
if done:
break
#if cant compleate tiral in centan number of steps then fail teh robot
if step >= 199:
print("Failed to complete in trial {}".format(trial))
#save very tent model
if step % 10 == 0:
dqn_agent.save_model("trial-{}.model".format(trial))
else:
print("Completed in {} trials".format(trial))
#save winning model
dqn_agent.save_model("success.model")
break
main()
|
[
"import gym\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Adam\nfrom collections import deque\nimport random\nfrom Envrioment import *\nimport numpy as np\n#deifne deep neural net calls\nclass DQN:\n def __init__(self, env):\n self.env = env\n #define memeonry\n self.memory = deque(maxlen=2000)\n #future rewards depertiaon\n self.gamma = 0.95\n #expoloration/explotation intial value, decay and minium\n self.epsilon = 1.0\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.1\n #learing rate\n self.learing_rate = 0.01\n #target model smoothing rate\n self.tau = .125\n #define model\n self.model = self.create_model()\n #define target model\n self.target_model = self.create_model()\n\n\n \n def create_model(self):\n #define sequintail model\n model = Sequential()\n #get state shape\n #add input layer\n model.add(Dense(48, input_shape=(81,), \n activation='relu'))\n #add layers\n model.add(Dense(48, activation=\"relu\"))\n model.add(Dense(48, activation=\"relu\"))\n #add output layer\n model.add(Dense(19))\n #comple model with adam opismiser and meen square rror lost\n model.compile(loss='mean_squared_error', \n optimizer=Adam(lr=self.learing_rate))\n #return model\n return model\n\n #tell model to remember state\n def remember(self, state, action, reward, newstate, done):\n self.memory.append([state, action, reward, newstate, done])\n\n #replay and train on a sample of memery data commtied to memory\n def replay(self):\n batch_size = 32\n #if not enough info return\n if len(self.memory) < batch_size:\n return\n #get random sample of states with bath size\n samples = random.sample(self.memory, batch_size) \n #itterate over samples\n for sample in samples:\n state, action, reward, newstate, done = sample\n #predict action\n target = self.target_model.predict(state)\n #jsut aplly pervious reward if state was finshing state\n if done:\n target[0][action] = reward\n #else use q function to weight both current reward and futre reward\n #cacluate futre reward baseupon strengh of model predictions\n else:\n #get futere aciton cofinede for reward\n Q_future = max(self.target_model.predict(newstate)[0])\n #add the reward for the action pluse futre\n target[0][action] = reward + Q_future * self.gamma\n #fit model to reward offets function returns \n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n #train target\n def train_target(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n #slowy convert target weights over time to the priamy model weights by tau each time. ie it will be 75 og weight. and 25 new weight and the next tik mroeso\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n #act model\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n #return random action space value\n return random.randint(0, 18)\n #else pic max vlaue\n return np.argmax(self.model.predict(state))\n\n #save model\n def save_model(self, fn):\n self.model.save(fn)\n\n#define main function\ndef main():\n env = Match(200)\n\n trials = 1000\n\n trial_len = 500\n\n dqn_agent = DQN(env=env)\n steps = []\n #runn through trials\n for trial in range(trials):\n cur_state = env.reset().reshape(1, 81)\n for step in range(trial_len):\n #get action\n action = dqn_agent.act(cur_state)\n #step env\n newstate, reward, done = env.step(0, action)\n print('reward: {}'.format(reward))\n #reshape newstate\n newstate = newstate.reshape(1, 81)\n #reward = reward if not done else -20\n #remember date\n dqn_agent.remember(cur_state, action, reward, newstate, done)\n #replay sates from memeory to train\n dqn_agent.replay()\n #train target\n dqn_agent.train_target()\n\n #set curretns tate to new state\n cur_state = newstate\n #break if done\n if done:\n break\n #if cant compleate tiral in centan number of steps then fail teh robot \n if step >= 199:\n print(\"Failed to complete in trial {}\".format(trial))\n #save very tent model\n if step % 10 == 0:\n dqn_agent.save_model(\"trial-{}.model\".format(trial))\n else:\n print(\"Completed in {} trials\".format(trial))\n #save winning model\n dqn_agent.save_model(\"success.model\")\n break\nmain()\n",
"import gym\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Adam\nfrom collections import deque\nimport random\nfrom Envrioment import *\nimport numpy as np\n\n\nclass DQN:\n\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.1\n self.learing_rate = 0.01\n self.tau = 0.125\n self.model = self.create_model()\n self.target_model = self.create_model()\n\n def create_model(self):\n model = Sequential()\n model.add(Dense(48, input_shape=(81,), activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(19))\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=self.\n learing_rate))\n return model\n\n def remember(self, state, action, reward, newstate, done):\n self.memory.append([state, action, reward, newstate, done])\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n\n def train_target(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (\n 1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return random.randint(0, 18)\n return np.argmax(self.model.predict(state))\n\n def save_model(self, fn):\n self.model.save(fn)\n\n\ndef main():\n env = Match(200)\n trials = 1000\n trial_len = 500\n dqn_agent = DQN(env=env)\n steps = []\n for trial in range(trials):\n cur_state = env.reset().reshape(1, 81)\n for step in range(trial_len):\n action = dqn_agent.act(cur_state)\n newstate, reward, done = env.step(0, action)\n print('reward: {}'.format(reward))\n newstate = newstate.reshape(1, 81)\n dqn_agent.remember(cur_state, action, reward, newstate, done)\n dqn_agent.replay()\n dqn_agent.train_target()\n cur_state = newstate\n if done:\n break\n if step >= 199:\n print('Failed to complete in trial {}'.format(trial))\n if step % 10 == 0:\n dqn_agent.save_model('trial-{}.model'.format(trial))\n else:\n print('Completed in {} trials'.format(trial))\n dqn_agent.save_model('success.model')\n break\n\n\nmain()\n",
"<import token>\n\n\nclass DQN:\n\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.1\n self.learing_rate = 0.01\n self.tau = 0.125\n self.model = self.create_model()\n self.target_model = self.create_model()\n\n def create_model(self):\n model = Sequential()\n model.add(Dense(48, input_shape=(81,), activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(19))\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=self.\n learing_rate))\n return model\n\n def remember(self, state, action, reward, newstate, done):\n self.memory.append([state, action, reward, newstate, done])\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n\n def train_target(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (\n 1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return random.randint(0, 18)\n return np.argmax(self.model.predict(state))\n\n def save_model(self, fn):\n self.model.save(fn)\n\n\ndef main():\n env = Match(200)\n trials = 1000\n trial_len = 500\n dqn_agent = DQN(env=env)\n steps = []\n for trial in range(trials):\n cur_state = env.reset().reshape(1, 81)\n for step in range(trial_len):\n action = dqn_agent.act(cur_state)\n newstate, reward, done = env.step(0, action)\n print('reward: {}'.format(reward))\n newstate = newstate.reshape(1, 81)\n dqn_agent.remember(cur_state, action, reward, newstate, done)\n dqn_agent.replay()\n dqn_agent.train_target()\n cur_state = newstate\n if done:\n break\n if step >= 199:\n print('Failed to complete in trial {}'.format(trial))\n if step % 10 == 0:\n dqn_agent.save_model('trial-{}.model'.format(trial))\n else:\n print('Completed in {} trials'.format(trial))\n dqn_agent.save_model('success.model')\n break\n\n\nmain()\n",
"<import token>\n\n\nclass DQN:\n\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.1\n self.learing_rate = 0.01\n self.tau = 0.125\n self.model = self.create_model()\n self.target_model = self.create_model()\n\n def create_model(self):\n model = Sequential()\n model.add(Dense(48, input_shape=(81,), activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(19))\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=self.\n learing_rate))\n return model\n\n def remember(self, state, action, reward, newstate, done):\n self.memory.append([state, action, reward, newstate, done])\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n\n def train_target(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (\n 1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return random.randint(0, 18)\n return np.argmax(self.model.predict(state))\n\n def save_model(self, fn):\n self.model.save(fn)\n\n\ndef main():\n env = Match(200)\n trials = 1000\n trial_len = 500\n dqn_agent = DQN(env=env)\n steps = []\n for trial in range(trials):\n cur_state = env.reset().reshape(1, 81)\n for step in range(trial_len):\n action = dqn_agent.act(cur_state)\n newstate, reward, done = env.step(0, action)\n print('reward: {}'.format(reward))\n newstate = newstate.reshape(1, 81)\n dqn_agent.remember(cur_state, action, reward, newstate, done)\n dqn_agent.replay()\n dqn_agent.train_target()\n cur_state = newstate\n if done:\n break\n if step >= 199:\n print('Failed to complete in trial {}'.format(trial))\n if step % 10 == 0:\n dqn_agent.save_model('trial-{}.model'.format(trial))\n else:\n print('Completed in {} trials'.format(trial))\n dqn_agent.save_model('success.model')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass DQN:\n\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.1\n self.learing_rate = 0.01\n self.tau = 0.125\n self.model = self.create_model()\n self.target_model = self.create_model()\n\n def create_model(self):\n model = Sequential()\n model.add(Dense(48, input_shape=(81,), activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(19))\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=self.\n learing_rate))\n return model\n\n def remember(self, state, action, reward, newstate, done):\n self.memory.append([state, action, reward, newstate, done])\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n\n def train_target(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (\n 1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return random.randint(0, 18)\n return np.argmax(self.model.predict(state))\n\n def save_model(self, fn):\n self.model.save(fn)\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass DQN:\n\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.1\n self.learing_rate = 0.01\n self.tau = 0.125\n self.model = self.create_model()\n self.target_model = self.create_model()\n\n def create_model(self):\n model = Sequential()\n model.add(Dense(48, input_shape=(81,), activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(19))\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=self.\n learing_rate))\n return model\n\n def remember(self, state, action, reward, newstate, done):\n self.memory.append([state, action, reward, newstate, done])\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n\n def train_target(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (\n 1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return random.randint(0, 18)\n return np.argmax(self.model.predict(state))\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass DQN:\n\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.1\n self.learing_rate = 0.01\n self.tau = 0.125\n self.model = self.create_model()\n self.target_model = self.create_model()\n\n def create_model(self):\n model = Sequential()\n model.add(Dense(48, input_shape=(81,), activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(48, activation='relu'))\n model.add(Dense(19))\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=self.\n learing_rate))\n return model\n <function token>\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n\n def train_target(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (\n 1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return random.randint(0, 18)\n return np.argmax(self.model.predict(state))\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass DQN:\n\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.1\n self.learing_rate = 0.01\n self.tau = 0.125\n self.model = self.create_model()\n self.target_model = self.create_model()\n <function token>\n <function token>\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n\n def train_target(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (\n 1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return random.randint(0, 18)\n return np.argmax(self.model.predict(state))\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass DQN:\n\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.1\n self.learing_rate = 0.01\n self.tau = 0.125\n self.model = self.create_model()\n self.target_model = self.create_model()\n <function token>\n <function token>\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n <function token>\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return random.randint(0, 18)\n return np.argmax(self.model.predict(state))\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass DQN:\n <function token>\n <function token>\n <function token>\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n <function token>\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return random.randint(0, 18)\n return np.argmax(self.model.predict(state))\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass DQN:\n <function token>\n <function token>\n <function token>\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, newstate, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(newstate)[0])\n target[0][action] = reward + Q_future * self.gamma\n print('fitting model')\n self.model.fit(state, target, epochs=1, verbose=0)\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass DQN:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<function token>\n<code token>\n"
] | false |
98,621 |
2a992cb5cf0e52f56106278eee894c2437db5411
|
def ticker():
infile = open('tickernames.txt', 'r')
regels = infile.readlines()
infile.close()
tickerdict = {}
for regel in regels:
tickerregel = regel.split(':')
sleutel = tickerregel[0]
waarde = tickerregel[1].strip()
tickerdict[sleutel] = waarde
return tickerdict
print(ticker())
|
[
"def ticker():\n infile = open('tickernames.txt', 'r')\n regels = infile.readlines()\n infile.close()\n tickerdict = {}\n for regel in regels:\n tickerregel = regel.split(':')\n sleutel = tickerregel[0]\n waarde = tickerregel[1].strip()\n tickerdict[sleutel] = waarde\n return tickerdict\n\nprint(ticker())",
"def ticker():\n infile = open('tickernames.txt', 'r')\n regels = infile.readlines()\n infile.close()\n tickerdict = {}\n for regel in regels:\n tickerregel = regel.split(':')\n sleutel = tickerregel[0]\n waarde = tickerregel[1].strip()\n tickerdict[sleutel] = waarde\n return tickerdict\n\n\nprint(ticker())\n",
"def ticker():\n infile = open('tickernames.txt', 'r')\n regels = infile.readlines()\n infile.close()\n tickerdict = {}\n for regel in regels:\n tickerregel = regel.split(':')\n sleutel = tickerregel[0]\n waarde = tickerregel[1].strip()\n tickerdict[sleutel] = waarde\n return tickerdict\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
98,622 |
8a995c2596926cca8665728f107887df7340eb95
|
import json
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
# Create your views here.
def index(request):
return render(request, 'index.html')
value_list = ['apple', 'pear', 'banana']
def load_login(request):
return render(request, 'login.html')
def search_key(request):
method = request.method
if method == 'POST':
body = json.loads(request.body)
if "key" not in body:
return JsonResponse([], safe=False)
key = body['key']
ret = []
for i in value_list:
if key in i:
ret.append(i)
return JsonResponse(ret, safe=False)
else:
return HttpResponse(status=404)
fruites = ['apple', 'pear', 'banana', 'orange']
def get_fruits(request):
return JsonResponse(fruites, safe=False)
def login(request):
users = [
{'user': 'user1', 'psw': 'user1'},
{'user': 'user2', 'psw': 'user2'}
]
method = request.method
if method == 'POST':
body = json.loads(request.body)
if "name" not in body or "psw" not in body:
return JsonResponse({'success': False}, safe=False)
for user in users:
if user['user'] == body['name'] and user['psw'] == body['psw']:
return JsonResponse({'success': True}, safe=False)
else:
return JsonResponse({'success': False}, safe=False)
else:
return HttpResponse(status=404)
def style_demo(request):
return render(request,'style_demo.html')
def component_info(request):
return render(request,'component_info.html')
|
[
"import json\n\nfrom django.shortcuts import render\nfrom django.http import JsonResponse, HttpResponse\n\n\n# Create your views here.\n\ndef index(request):\n return render(request, 'index.html')\n\n\nvalue_list = ['apple', 'pear', 'banana']\n\n\ndef load_login(request):\n return render(request, 'login.html')\n\n\ndef search_key(request):\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if \"key\" not in body:\n return JsonResponse([], safe=False)\n\n key = body['key']\n ret = []\n\n for i in value_list:\n if key in i:\n ret.append(i)\n\n return JsonResponse(ret, safe=False)\n\n else:\n return HttpResponse(status=404)\n\n\nfruites = ['apple', 'pear', 'banana', 'orange']\n\n\ndef get_fruits(request):\n return JsonResponse(fruites, safe=False)\n\n\ndef login(request):\n users = [\n {'user': 'user1', 'psw': 'user1'},\n {'user': 'user2', 'psw': 'user2'}\n ]\n\n method = request.method\n\n if method == 'POST':\n body = json.loads(request.body)\n\n if \"name\" not in body or \"psw\" not in body:\n return JsonResponse({'success': False}, safe=False)\n\n for user in users:\n if user['user'] == body['name'] and user['psw'] == body['psw']:\n return JsonResponse({'success': True}, safe=False)\n else:\n return JsonResponse({'success': False}, safe=False)\n\n else:\n return HttpResponse(status=404)\n\n\ndef style_demo(request):\n return render(request,'style_demo.html')\n\n\ndef component_info(request):\n return render(request,'component_info.html')",
"import json\nfrom django.shortcuts import render\nfrom django.http import JsonResponse, HttpResponse\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\nvalue_list = ['apple', 'pear', 'banana']\n\n\ndef load_login(request):\n return render(request, 'login.html')\n\n\ndef search_key(request):\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if 'key' not in body:\n return JsonResponse([], safe=False)\n key = body['key']\n ret = []\n for i in value_list:\n if key in i:\n ret.append(i)\n return JsonResponse(ret, safe=False)\n else:\n return HttpResponse(status=404)\n\n\nfruites = ['apple', 'pear', 'banana', 'orange']\n\n\ndef get_fruits(request):\n return JsonResponse(fruites, safe=False)\n\n\ndef login(request):\n users = [{'user': 'user1', 'psw': 'user1'}, {'user': 'user2', 'psw':\n 'user2'}]\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if 'name' not in body or 'psw' not in body:\n return JsonResponse({'success': False}, safe=False)\n for user in users:\n if user['user'] == body['name'] and user['psw'] == body['psw']:\n return JsonResponse({'success': True}, safe=False)\n else:\n return JsonResponse({'success': False}, safe=False)\n else:\n return HttpResponse(status=404)\n\n\ndef style_demo(request):\n return render(request, 'style_demo.html')\n\n\ndef component_info(request):\n return render(request, 'component_info.html')\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\nvalue_list = ['apple', 'pear', 'banana']\n\n\ndef load_login(request):\n return render(request, 'login.html')\n\n\ndef search_key(request):\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if 'key' not in body:\n return JsonResponse([], safe=False)\n key = body['key']\n ret = []\n for i in value_list:\n if key in i:\n ret.append(i)\n return JsonResponse(ret, safe=False)\n else:\n return HttpResponse(status=404)\n\n\nfruites = ['apple', 'pear', 'banana', 'orange']\n\n\ndef get_fruits(request):\n return JsonResponse(fruites, safe=False)\n\n\ndef login(request):\n users = [{'user': 'user1', 'psw': 'user1'}, {'user': 'user2', 'psw':\n 'user2'}]\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if 'name' not in body or 'psw' not in body:\n return JsonResponse({'success': False}, safe=False)\n for user in users:\n if user['user'] == body['name'] and user['psw'] == body['psw']:\n return JsonResponse({'success': True}, safe=False)\n else:\n return JsonResponse({'success': False}, safe=False)\n else:\n return HttpResponse(status=404)\n\n\ndef style_demo(request):\n return render(request, 'style_demo.html')\n\n\ndef component_info(request):\n return render(request, 'component_info.html')\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n<assignment token>\n\n\ndef load_login(request):\n return render(request, 'login.html')\n\n\ndef search_key(request):\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if 'key' not in body:\n return JsonResponse([], safe=False)\n key = body['key']\n ret = []\n for i in value_list:\n if key in i:\n ret.append(i)\n return JsonResponse(ret, safe=False)\n else:\n return HttpResponse(status=404)\n\n\n<assignment token>\n\n\ndef get_fruits(request):\n return JsonResponse(fruites, safe=False)\n\n\ndef login(request):\n users = [{'user': 'user1', 'psw': 'user1'}, {'user': 'user2', 'psw':\n 'user2'}]\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if 'name' not in body or 'psw' not in body:\n return JsonResponse({'success': False}, safe=False)\n for user in users:\n if user['user'] == body['name'] and user['psw'] == body['psw']:\n return JsonResponse({'success': True}, safe=False)\n else:\n return JsonResponse({'success': False}, safe=False)\n else:\n return HttpResponse(status=404)\n\n\ndef style_demo(request):\n return render(request, 'style_demo.html')\n\n\ndef component_info(request):\n return render(request, 'component_info.html')\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n<assignment token>\n<function token>\n\n\ndef search_key(request):\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if 'key' not in body:\n return JsonResponse([], safe=False)\n key = body['key']\n ret = []\n for i in value_list:\n if key in i:\n ret.append(i)\n return JsonResponse(ret, safe=False)\n else:\n return HttpResponse(status=404)\n\n\n<assignment token>\n\n\ndef get_fruits(request):\n return JsonResponse(fruites, safe=False)\n\n\ndef login(request):\n users = [{'user': 'user1', 'psw': 'user1'}, {'user': 'user2', 'psw':\n 'user2'}]\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if 'name' not in body or 'psw' not in body:\n return JsonResponse({'success': False}, safe=False)\n for user in users:\n if user['user'] == body['name'] and user['psw'] == body['psw']:\n return JsonResponse({'success': True}, safe=False)\n else:\n return JsonResponse({'success': False}, safe=False)\n else:\n return HttpResponse(status=404)\n\n\ndef style_demo(request):\n return render(request, 'style_demo.html')\n\n\ndef component_info(request):\n return render(request, 'component_info.html')\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n<assignment token>\n<function token>\n\n\ndef search_key(request):\n method = request.method\n if method == 'POST':\n body = json.loads(request.body)\n if 'key' not in body:\n return JsonResponse([], safe=False)\n key = body['key']\n ret = []\n for i in value_list:\n if key in i:\n ret.append(i)\n return JsonResponse(ret, safe=False)\n else:\n return HttpResponse(status=404)\n\n\n<assignment token>\n\n\ndef get_fruits(request):\n return JsonResponse(fruites, safe=False)\n\n\n<function token>\n\n\ndef style_demo(request):\n return render(request, 'style_demo.html')\n\n\ndef component_info(request):\n return render(request, 'component_info.html')\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n\n\ndef get_fruits(request):\n return JsonResponse(fruites, safe=False)\n\n\n<function token>\n\n\ndef style_demo(request):\n return render(request, 'style_demo.html')\n\n\ndef component_info(request):\n return render(request, 'component_info.html')\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n\n\ndef get_fruits(request):\n return JsonResponse(fruites, safe=False)\n\n\n<function token>\n\n\ndef style_demo(request):\n return render(request, 'style_demo.html')\n\n\n<function token>\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef style_demo(request):\n return render(request, 'style_demo.html')\n\n\n<function token>\n",
"<import token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef style_demo(request):\n return render(request, 'style_demo.html')\n\n\n<function token>\n",
"<import token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,623 |
ee8456f34385061376c345926a02d6095581ec3b
|
# ______________________________________________________________________________________________________________________
# this code is **************************************(U) UNCLASSIFIED***************************************************
# ______________________________________________________________________________________________________________________
# coding=utf-8
# ----------------------------------------------------------------------------------------------------------------------
# program name: parse_tweet_data
# major version: 1.1
# program purpose: This program converts the updated ~10M tweet dataset and uses various functions to parse,
# inventory, and prep the data for topic modelling and metadata analysis.
# python version: 3.6
#
# Author: Emily Parrish
# major version created:20200602
# last modification: 20200602 Created all major functions for inventoring and parsing
# 20200612 Adjusted directory structure for outputs
# ----------------------------------------------------------------------------------------------------------------------
import os
import sys
import string
import csv
import operator
import pandas as pd
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import collections
import re
from helpers import *
# global paths
path = r'E:\Twitter\Russia\Russia_1906'
path_split = path.split('\\')
# current date
today = '20' + datetime.now().strftime('%m%d')
def convert_source(infile):
''' Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a
pkl file for more efficient import of other processes.
Inputs: *.csv file
Outputs: *.pkl file in the input directory
'''
# construct absolute path
filepath = os.path.join(path, infile)
# import *.csv file to data frame
df = pd.read_csv(filepath)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save data frame to *.pkl file of same name
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
def convert_comb(files):
'''Takes a list of *.csv file inputs and creates a single data frames from each of them.
Inputs: Input *.csv files to be combined into a single data frame
Outputs: A data frame for each input and a single output combined data frame
'''
dfs = list()
for file in files:
ext_path = os.path.join(path, '1_DataFrames')
df = pd.read_pickle(os.path.join(ext_path, file))
dfs.append(df)
new_df = merge_df(dfs)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
new_df.to_pickle(os.path.join(ext_path, outfile))
return new_df
def sort_df(df, field='tweet_time'):
''' Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It
also adds a column called "unique_id_ida" with formatted ID numbers for each tweet.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)
Outputs: *.pkl file in the input directory with "sorted" label sorted, containing additional column "unique_id_ida"
with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)
'''
# turn any time fields into datetime objects
if field == 'tweet_time':
df['tweet_time'] = pd.to_datetime(df.tweet_time)
# sort data frame by field column
df = df.sort_values(by=field)
# generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)
in_list = list()
for i in range(0, len(df.index)):
i = str(i)
while len(i) != 7:
i = "0" + i
in_list.append(i)
# add column ID numbers to data frame
df['unique_id_ida'] = np.array(in_list)
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
return df
def split_df(df, num=30):
'''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls
the sort function to sort the data frame by the default (date).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Number of inventories to split into. Users discresction depending on size of the data set.
Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a
way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are
sorted by date with the ranges of dates in each inventory in the file name
(i.e. AA_Twitter10M_090509_130214.csv)
'''
df = sort_df(df)
alphabets = string.ascii_lowercase
a_list = list()
for i in alphabets:
for j in alphabets:
a_list.append(i.upper() + j.upper())
# splits data set into 30 different data frames of equal size, which will each represent an individual inventory.
df_split = np.array_split(df, num)
subpath = os.path.join(path, '2_Inventories')
alpha_index = 0
last_i = len(df_split) - 1
for item in df_split:
df_sub = pd.concat([item.head(1), item.tail(1)])
date_bounds = pd.Series(df_sub['tweet_time'].tolist())
date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()
to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]
if alpha_index == 0:
comb_df = to_file
elif alpha_index == last_i:
comb_df = pd.concat([extra_rows, item], axis=0)
else:
comb_df = pd.concat([extra_rows, to_file], axis=0)
prevdate = str(int(date_bounds_format[1]) - 1)
filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'
print(filename)
filepath = os.path.join(subpath,filename)
comb_df.to_csv(filepath)
extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]
alpha_index += 1
def get_lang(df, lang):
'''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language code for language of interest
Outputs: Pandas data frame with a subset of tweets from that specific language
'''
lang_df = df.loc[df['tweet_language'] == lang]
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
lang_df.to_pickle(os.path.join(ext_path, outfile))
return lang_df
def strip_formatting(df, lim, lang='allLang'):
'''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.
Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-
data frame.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Character limit for parsing after strip functionality implemented
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities
of interest
'''
tweets = df['tweet_text'].to_list()
edit_tweets = list()
include = list()
for tweet in tweets:
strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))
edit_tweets.append(strip_tweet)
if is_length(strip_tweet, lim):
include.append('1')
else:
include.append('0')
df['stripped_tweet'] = edit_tweets
df['tweet_length'] = df['tweet_text'].str.len()
df['include_topic_model'] = include
df['stripped_tweet_length'] = df['include_topic_model'].str.len()
sub_df = df.loc[df['include_topic_model'] == '1']
outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
sub_df.to_pickle(os.path.join(ext_path, outfile))
return sub_df
def extract_content(df, label='All_Languages'):
'''Takes an input data frame and extracts the individual Tweets and places it in chronological directories
incremented by intervals based on a month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content
only in the file. Each file is named accordingly like the following example:
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()
date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()
content = pd.Series(df['stripped_tweet'].tolist())
unid = pd.Series(df['unique_id_ida'].tolist())
print('Total Files to process: ' + str(len(date_bounds_ymd)))
parentdir = os.path.join(path, label)
os.mkdir(parentdir)
for i in range(0, len(date_bounds_ymd)):
dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]
fulldir = os.path.join(parentdir, dir)
filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4] + '.txt'
outpath = os.path.join(fulldir, filename)
if os.path.exists(outpath):
pass
else:
if os.path.isdir(fulldir):
pass
else:
os.mkdir(fulldir)
if int(i) % 10000 == 0:
print('Files up to ' + str(i) + ' processed.')
f = open(outpath, 'w', encoding='utf-8')
f.write(content[i])
f.close()
def generate_freq(df):
'''Takes an input data frame and generates a histogram of number of tweets binned by month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Input parameter called "increment", which determined by what time interval the tweets are organized
Outputs: Histogram
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ym = (date_bounds.dt.strftime('%Y-%m')).tolist()
df['date_md'] = np.array(date_bounds_ym)
sort = df.sort_values(by=['date_md'])
frq = sort['date_md'].value_counts().to_dict()
frq_df = sort['date_md'].value_counts()
od = collections.OrderedDict(sorted(frq.items()))
rf_dates = list()
for item in list(od.keys()):
date_rf = date_reformat(item)
rf_dates.append(date_rf)
data = {"Date": rf_dates, "Freq": list(od.values())}
graph_frame = pd.dataframe(data=data)
frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))
ax = graph_frame.plot.bar(x="Date", y="Freq", rot=45)
plt.show()
def main():
print('Start time: ' + str(datetime.now()))
infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'
inpath = os.path.join(path, '1_DataFrames')
infilepath = os.path.join(inpath, infile)
stripped_en = pd.read_pickle(infilepath)
print(stripped_en.head()['unique_id_ida'])
# sorted = sort_df(df)
# split_df(sorted, 1)
#
# en_df = get_lang(sorted, 'en')
# stripped_en = strip_formatting(en_df, 10, 'en')
#
# ru_df = get_lang(sorted, 'ru')
# stripped_ru = strip_formatting(ru_df, 12, 'ru')
#
# zh_df = get_lang(sorted, 'zh')
# stripped_zh = strip_formatting(zh_df, 2, 'zh')
extract_content(stripped_en, 'English')
print('End time: ' + str(datetime.now()))
if __name__ == '__main__':
main()
# this code is **************************************(U) UNCLASSIFIED***************************************************
|
[
"# ______________________________________________________________________________________________________________________\n# this code is **************************************(U) UNCLASSIFIED***************************************************\n# ______________________________________________________________________________________________________________________\n# coding=utf-8\n# ----------------------------------------------------------------------------------------------------------------------\n# program name: parse_tweet_data\n# major version: 1.1\n# program purpose: This program converts the updated ~10M tweet dataset and uses various functions to parse,\n# inventory, and prep the data for topic modelling and metadata analysis.\n# python version: 3.6\n#\n# Author: Emily Parrish\n# major version created:20200602\n# last modification: 20200602 Created all major functions for inventoring and parsing\n# 20200612 Adjusted directory structure for outputs\n\n# ----------------------------------------------------------------------------------------------------------------------\n\nimport os\nimport sys\nimport string\nimport csv\nimport operator\nimport pandas as pd\nfrom datetime import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport collections\nimport re\nfrom helpers import *\n\n# global paths\npath = r'E:\\Twitter\\Russia\\Russia_1906'\npath_split = path.split('\\\\')\n# current date\ntoday = '20' + datetime.now().strftime('%m%d')\n\ndef convert_source(infile):\n ''' Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a\n pkl file for more efficient import of other processes.\n\n Inputs: *.csv file\n Outputs: *.pkl file in the input directory\n '''\n\n # construct absolute path\n filepath = os.path.join(path, infile)\n\n # import *.csv file to data frame\n df = pd.read_csv(filepath)\n\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n\n # save data frame to *.pkl file of same name\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n\ndef convert_comb(files):\n '''Takes a list of *.csv file inputs and creates a single data frames from each of them.\n\n Inputs: Input *.csv files to be combined into a single data frame\n Outputs: A data frame for each input and a single output combined data frame\n '''\n\n dfs = list()\n for file in files:\n ext_path = os.path.join(path, '1_DataFrames')\n df = pd.read_pickle(os.path.join(ext_path, file))\n dfs.append(df)\n\n new_df = merge_df(dfs)\n\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n\n # save new data frame under *.pkl file\n ext_path = os.path.join(path, '1_DataFrames')\n new_df.to_pickle(os.path.join(ext_path, outfile))\n\n return new_df\n\ndef sort_df(df, field='tweet_time'):\n ''' Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n '''\n\n # turn any time fields into datetime objects\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n\n # sort data frame by field column\n df = df.sort_values(by=field)\n\n # generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = \"0\" + i\n in_list.append(i)\n\n # add column ID numbers to data frame\n df['unique_id_ida'] = np.array(in_list)\n\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n\n # save new data frame under *.pkl file\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n\n return df\n\ndef split_df(df, num=30):\n '''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n '''\n\n df = sort_df(df)\n\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n\n # splits data set into 30 different data frames of equal size, which will each represent an individual inventory.\n df_split = np.array_split(df, num) \n subpath = os.path.join(path, '2_Inventories')\n\n alpha_index = 0\n last_i = len(df_split) - 1\n\n for item in df_split:\n\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()\n\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]\n\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'\n print(filename)\n\n filepath = os.path.join(subpath,filename)\n comb_df.to_csv(filepath)\n\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\ndef get_lang(df, lang):\n '''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language code for language of interest\n Outputs: Pandas data frame with a subset of tweets from that specific language\n '''\n lang_df = df.loc[df['tweet_language'] == lang]\n\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'\n\n # save new data frame under *.pkl file\n ext_path = os.path.join(path, '1_DataFrames')\n lang_df.to_pickle(os.path.join(ext_path, outfile))\n\n return lang_df\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n '''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n '''\n\n tweets = df['tweet_text'].to_list()\n\n edit_tweets = list()\n include = list()\n\n for tweet in tweets:\n\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))\n\n edit_tweets.append(strip_tweet)\n\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n\n sub_df = df.loc[df['include_topic_model'] == '1']\n\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n\n # save new data frame under *.pkl file\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n\n return sub_df\n\ndef extract_content(df, label='All_Languages'):\n '''Takes an input data frame and extracts the individual Tweets and places it in chronological directories\n incremented by intervals based on a month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content\n only in the file. Each file is named accordingly like the following example:\n '''\n\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()\n date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()\n content = pd.Series(df['stripped_tweet'].tolist())\n unid = pd.Series(df['unique_id_ida'].tolist())\n print('Total Files to process: ' + str(len(date_bounds_ymd)))\n\n parentdir = os.path.join(path, label)\n os.mkdir(parentdir)\n\n for i in range(0, len(date_bounds_ymd)):\n dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]\n fulldir = os.path.join(parentdir, dir)\n filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4] + '.txt'\n\n outpath = os.path.join(fulldir, filename)\n\n if os.path.exists(outpath):\n pass\n else:\n if os.path.isdir(fulldir):\n pass\n else:\n os.mkdir(fulldir)\n\n if int(i) % 10000 == 0:\n\n print('Files up to ' + str(i) + ' processed.')\n\n f = open(outpath, 'w', encoding='utf-8')\n f.write(content[i])\n f.close()\n\ndef generate_freq(df):\n '''Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n '''\n\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = (date_bounds.dt.strftime('%Y-%m')).tolist()\n df['date_md'] = np.array(date_bounds_ym)\n\n sort = df.sort_values(by=['date_md'])\n\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n\n data = {\"Date\": rf_dates, \"Freq\": list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n\n ax = graph_frame.plot.bar(x=\"Date\", y=\"Freq\", rot=45)\n\n plt.show()\n\n\ndef main():\n print('Start time: ' + str(datetime.now()))\n\n infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'\n inpath = os.path.join(path, '1_DataFrames')\n infilepath = os.path.join(inpath, infile)\n\n stripped_en = pd.read_pickle(infilepath)\n print(stripped_en.head()['unique_id_ida'])\n\n # sorted = sort_df(df)\n # split_df(sorted, 1)\n #\n # en_df = get_lang(sorted, 'en')\n # stripped_en = strip_formatting(en_df, 10, 'en')\n #\n # ru_df = get_lang(sorted, 'ru')\n # stripped_ru = strip_formatting(ru_df, 12, 'ru')\n #\n # zh_df = get_lang(sorted, 'zh')\n # stripped_zh = strip_formatting(zh_df, 2, 'zh')\n\n extract_content(stripped_en, 'English')\n\n print('End time: ' + str(datetime.now()))\n\nif __name__ == '__main__':\n main()\n\n# this code is **************************************(U) UNCLASSIFIED***************************************************",
"import os\nimport sys\nimport string\nimport csv\nimport operator\nimport pandas as pd\nfrom datetime import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport collections\nimport re\nfrom helpers import *\npath = 'E:\\\\Twitter\\\\Russia\\\\Russia_1906'\npath_split = path.split('\\\\')\ntoday = '20' + datetime.now().strftime('%m%d')\n\n\ndef convert_source(infile):\n \"\"\" Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a\n pkl file for more efficient import of other processes.\n\n Inputs: *.csv file\n Outputs: *.pkl file in the input directory\n \"\"\"\n filepath = os.path.join(path, infile)\n df = pd.read_csv(filepath)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n\n\ndef convert_comb(files):\n \"\"\"Takes a list of *.csv file inputs and creates a single data frames from each of them.\n\n Inputs: Input *.csv files to be combined into a single data frame\n Outputs: A data frame for each input and a single output combined data frame\n \"\"\"\n dfs = list()\n for file in files:\n ext_path = os.path.join(path, '1_DataFrames')\n df = pd.read_pickle(os.path.join(ext_path, file))\n dfs.append(df)\n new_df = merge_df(dfs)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n new_df.to_pickle(os.path.join(ext_path, outfile))\n return new_df\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\ndef split_df(df, num=30):\n \"\"\"Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n \"\"\"\n df = sort_df(df)\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n df_split = np.array_split(df, num)\n subpath = os.path.join(path, '2_Inventories')\n alpha_index = 0\n last_i = len(df_split) - 1\n for item in df_split:\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = date_bounds.dt.strftime('%Y%m%d').tolist()\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds\n [1].strftime('%Y%m%d')]\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1\n ] + '_' + path_split[3] + '_' + date_bounds_format[0][2:\n ] + '_' + prevdate[2:] + '.csv'\n print(filename)\n filepath = os.path.join(subpath, filename)\n comb_df.to_csv(filepath)\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') ==\n date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\ndef get_lang(df, lang):\n \"\"\"Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language code for language of interest\n Outputs: Pandas data frame with a subset of tweets from that specific language\n \"\"\"\n lang_df = df.loc[df['tweet_language'] == lang]\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n lang_df.to_pickle(os.path.join(ext_path, outfile))\n return lang_df\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\ndef extract_content(df, label='All_Languages'):\n \"\"\"Takes an input data frame and extracts the individual Tweets and places it in chronological directories\n incremented by intervals based on a month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content\n only in the file. Each file is named accordingly like the following example:\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ymd = date_bounds.dt.strftime('%Y%m%d').tolist()\n date_bounds_hms = date_bounds.dt.strftime('%H%M').tolist()\n content = pd.Series(df['stripped_tweet'].tolist())\n unid = pd.Series(df['unique_id_ida'].tolist())\n print('Total Files to process: ' + str(len(date_bounds_ymd)))\n parentdir = os.path.join(path, label)\n os.mkdir(parentdir)\n for i in range(0, len(date_bounds_ymd)):\n dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]\n fulldir = os.path.join(parentdir, dir)\n filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3\n ] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4\n ] + '.txt'\n outpath = os.path.join(fulldir, filename)\n if os.path.exists(outpath):\n pass\n else:\n if os.path.isdir(fulldir):\n pass\n else:\n os.mkdir(fulldir)\n if int(i) % 10000 == 0:\n print('Files up to ' + str(i) + ' processed.')\n f = open(outpath, 'w', encoding='utf-8')\n f.write(content[i])\n f.close()\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\ndef main():\n print('Start time: ' + str(datetime.now()))\n infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'\n inpath = os.path.join(path, '1_DataFrames')\n infilepath = os.path.join(inpath, infile)\n stripped_en = pd.read_pickle(infilepath)\n print(stripped_en.head()['unique_id_ida'])\n extract_content(stripped_en, 'English')\n print('End time: ' + str(datetime.now()))\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\npath = 'E:\\\\Twitter\\\\Russia\\\\Russia_1906'\npath_split = path.split('\\\\')\ntoday = '20' + datetime.now().strftime('%m%d')\n\n\ndef convert_source(infile):\n \"\"\" Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a\n pkl file for more efficient import of other processes.\n\n Inputs: *.csv file\n Outputs: *.pkl file in the input directory\n \"\"\"\n filepath = os.path.join(path, infile)\n df = pd.read_csv(filepath)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n\n\ndef convert_comb(files):\n \"\"\"Takes a list of *.csv file inputs and creates a single data frames from each of them.\n\n Inputs: Input *.csv files to be combined into a single data frame\n Outputs: A data frame for each input and a single output combined data frame\n \"\"\"\n dfs = list()\n for file in files:\n ext_path = os.path.join(path, '1_DataFrames')\n df = pd.read_pickle(os.path.join(ext_path, file))\n dfs.append(df)\n new_df = merge_df(dfs)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n new_df.to_pickle(os.path.join(ext_path, outfile))\n return new_df\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\ndef split_df(df, num=30):\n \"\"\"Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n \"\"\"\n df = sort_df(df)\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n df_split = np.array_split(df, num)\n subpath = os.path.join(path, '2_Inventories')\n alpha_index = 0\n last_i = len(df_split) - 1\n for item in df_split:\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = date_bounds.dt.strftime('%Y%m%d').tolist()\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds\n [1].strftime('%Y%m%d')]\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1\n ] + '_' + path_split[3] + '_' + date_bounds_format[0][2:\n ] + '_' + prevdate[2:] + '.csv'\n print(filename)\n filepath = os.path.join(subpath, filename)\n comb_df.to_csv(filepath)\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') ==\n date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\ndef get_lang(df, lang):\n \"\"\"Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language code for language of interest\n Outputs: Pandas data frame with a subset of tweets from that specific language\n \"\"\"\n lang_df = df.loc[df['tweet_language'] == lang]\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n lang_df.to_pickle(os.path.join(ext_path, outfile))\n return lang_df\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\ndef extract_content(df, label='All_Languages'):\n \"\"\"Takes an input data frame and extracts the individual Tweets and places it in chronological directories\n incremented by intervals based on a month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content\n only in the file. Each file is named accordingly like the following example:\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ymd = date_bounds.dt.strftime('%Y%m%d').tolist()\n date_bounds_hms = date_bounds.dt.strftime('%H%M').tolist()\n content = pd.Series(df['stripped_tweet'].tolist())\n unid = pd.Series(df['unique_id_ida'].tolist())\n print('Total Files to process: ' + str(len(date_bounds_ymd)))\n parentdir = os.path.join(path, label)\n os.mkdir(parentdir)\n for i in range(0, len(date_bounds_ymd)):\n dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]\n fulldir = os.path.join(parentdir, dir)\n filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3\n ] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4\n ] + '.txt'\n outpath = os.path.join(fulldir, filename)\n if os.path.exists(outpath):\n pass\n else:\n if os.path.isdir(fulldir):\n pass\n else:\n os.mkdir(fulldir)\n if int(i) % 10000 == 0:\n print('Files up to ' + str(i) + ' processed.')\n f = open(outpath, 'w', encoding='utf-8')\n f.write(content[i])\n f.close()\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\ndef main():\n print('Start time: ' + str(datetime.now()))\n infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'\n inpath = os.path.join(path, '1_DataFrames')\n infilepath = os.path.join(inpath, infile)\n stripped_en = pd.read_pickle(infilepath)\n print(stripped_en.head()['unique_id_ida'])\n extract_content(stripped_en, 'English')\n print('End time: ' + str(datetime.now()))\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\n\n\ndef convert_source(infile):\n \"\"\" Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a\n pkl file for more efficient import of other processes.\n\n Inputs: *.csv file\n Outputs: *.pkl file in the input directory\n \"\"\"\n filepath = os.path.join(path, infile)\n df = pd.read_csv(filepath)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n\n\ndef convert_comb(files):\n \"\"\"Takes a list of *.csv file inputs and creates a single data frames from each of them.\n\n Inputs: Input *.csv files to be combined into a single data frame\n Outputs: A data frame for each input and a single output combined data frame\n \"\"\"\n dfs = list()\n for file in files:\n ext_path = os.path.join(path, '1_DataFrames')\n df = pd.read_pickle(os.path.join(ext_path, file))\n dfs.append(df)\n new_df = merge_df(dfs)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n new_df.to_pickle(os.path.join(ext_path, outfile))\n return new_df\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\ndef split_df(df, num=30):\n \"\"\"Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n \"\"\"\n df = sort_df(df)\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n df_split = np.array_split(df, num)\n subpath = os.path.join(path, '2_Inventories')\n alpha_index = 0\n last_i = len(df_split) - 1\n for item in df_split:\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = date_bounds.dt.strftime('%Y%m%d').tolist()\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds\n [1].strftime('%Y%m%d')]\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1\n ] + '_' + path_split[3] + '_' + date_bounds_format[0][2:\n ] + '_' + prevdate[2:] + '.csv'\n print(filename)\n filepath = os.path.join(subpath, filename)\n comb_df.to_csv(filepath)\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') ==\n date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\ndef get_lang(df, lang):\n \"\"\"Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language code for language of interest\n Outputs: Pandas data frame with a subset of tweets from that specific language\n \"\"\"\n lang_df = df.loc[df['tweet_language'] == lang]\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n lang_df.to_pickle(os.path.join(ext_path, outfile))\n return lang_df\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\ndef extract_content(df, label='All_Languages'):\n \"\"\"Takes an input data frame and extracts the individual Tweets and places it in chronological directories\n incremented by intervals based on a month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content\n only in the file. Each file is named accordingly like the following example:\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ymd = date_bounds.dt.strftime('%Y%m%d').tolist()\n date_bounds_hms = date_bounds.dt.strftime('%H%M').tolist()\n content = pd.Series(df['stripped_tweet'].tolist())\n unid = pd.Series(df['unique_id_ida'].tolist())\n print('Total Files to process: ' + str(len(date_bounds_ymd)))\n parentdir = os.path.join(path, label)\n os.mkdir(parentdir)\n for i in range(0, len(date_bounds_ymd)):\n dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]\n fulldir = os.path.join(parentdir, dir)\n filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3\n ] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4\n ] + '.txt'\n outpath = os.path.join(fulldir, filename)\n if os.path.exists(outpath):\n pass\n else:\n if os.path.isdir(fulldir):\n pass\n else:\n os.mkdir(fulldir)\n if int(i) % 10000 == 0:\n print('Files up to ' + str(i) + ' processed.')\n f = open(outpath, 'w', encoding='utf-8')\n f.write(content[i])\n f.close()\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\ndef main():\n print('Start time: ' + str(datetime.now()))\n infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'\n inpath = os.path.join(path, '1_DataFrames')\n infilepath = os.path.join(inpath, infile)\n stripped_en = pd.read_pickle(infilepath)\n print(stripped_en.head()['unique_id_ida'])\n extract_content(stripped_en, 'English')\n print('End time: ' + str(datetime.now()))\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\n\n\ndef convert_source(infile):\n \"\"\" Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a\n pkl file for more efficient import of other processes.\n\n Inputs: *.csv file\n Outputs: *.pkl file in the input directory\n \"\"\"\n filepath = os.path.join(path, infile)\n df = pd.read_csv(filepath)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n\n\ndef convert_comb(files):\n \"\"\"Takes a list of *.csv file inputs and creates a single data frames from each of them.\n\n Inputs: Input *.csv files to be combined into a single data frame\n Outputs: A data frame for each input and a single output combined data frame\n \"\"\"\n dfs = list()\n for file in files:\n ext_path = os.path.join(path, '1_DataFrames')\n df = pd.read_pickle(os.path.join(ext_path, file))\n dfs.append(df)\n new_df = merge_df(dfs)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n new_df.to_pickle(os.path.join(ext_path, outfile))\n return new_df\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\ndef split_df(df, num=30):\n \"\"\"Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n \"\"\"\n df = sort_df(df)\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n df_split = np.array_split(df, num)\n subpath = os.path.join(path, '2_Inventories')\n alpha_index = 0\n last_i = len(df_split) - 1\n for item in df_split:\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = date_bounds.dt.strftime('%Y%m%d').tolist()\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds\n [1].strftime('%Y%m%d')]\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1\n ] + '_' + path_split[3] + '_' + date_bounds_format[0][2:\n ] + '_' + prevdate[2:] + '.csv'\n print(filename)\n filepath = os.path.join(subpath, filename)\n comb_df.to_csv(filepath)\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') ==\n date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\ndef get_lang(df, lang):\n \"\"\"Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language code for language of interest\n Outputs: Pandas data frame with a subset of tweets from that specific language\n \"\"\"\n lang_df = df.loc[df['tweet_language'] == lang]\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n lang_df.to_pickle(os.path.join(ext_path, outfile))\n return lang_df\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\ndef extract_content(df, label='All_Languages'):\n \"\"\"Takes an input data frame and extracts the individual Tweets and places it in chronological directories\n incremented by intervals based on a month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content\n only in the file. Each file is named accordingly like the following example:\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ymd = date_bounds.dt.strftime('%Y%m%d').tolist()\n date_bounds_hms = date_bounds.dt.strftime('%H%M').tolist()\n content = pd.Series(df['stripped_tweet'].tolist())\n unid = pd.Series(df['unique_id_ida'].tolist())\n print('Total Files to process: ' + str(len(date_bounds_ymd)))\n parentdir = os.path.join(path, label)\n os.mkdir(parentdir)\n for i in range(0, len(date_bounds_ymd)):\n dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]\n fulldir = os.path.join(parentdir, dir)\n filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3\n ] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4\n ] + '.txt'\n outpath = os.path.join(fulldir, filename)\n if os.path.exists(outpath):\n pass\n else:\n if os.path.isdir(fulldir):\n pass\n else:\n os.mkdir(fulldir)\n if int(i) % 10000 == 0:\n print('Files up to ' + str(i) + ' processed.')\n f = open(outpath, 'w', encoding='utf-8')\n f.write(content[i])\n f.close()\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\ndef main():\n print('Start time: ' + str(datetime.now()))\n infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'\n inpath = os.path.join(path, '1_DataFrames')\n infilepath = os.path.join(inpath, infile)\n stripped_en = pd.read_pickle(infilepath)\n print(stripped_en.head()['unique_id_ida'])\n extract_content(stripped_en, 'English')\n print('End time: ' + str(datetime.now()))\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef convert_source(infile):\n \"\"\" Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a\n pkl file for more efficient import of other processes.\n\n Inputs: *.csv file\n Outputs: *.pkl file in the input directory\n \"\"\"\n filepath = os.path.join(path, infile)\n df = pd.read_csv(filepath)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n\n\ndef convert_comb(files):\n \"\"\"Takes a list of *.csv file inputs and creates a single data frames from each of them.\n\n Inputs: Input *.csv files to be combined into a single data frame\n Outputs: A data frame for each input and a single output combined data frame\n \"\"\"\n dfs = list()\n for file in files:\n ext_path = os.path.join(path, '1_DataFrames')\n df = pd.read_pickle(os.path.join(ext_path, file))\n dfs.append(df)\n new_df = merge_df(dfs)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n new_df.to_pickle(os.path.join(ext_path, outfile))\n return new_df\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\ndef split_df(df, num=30):\n \"\"\"Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n \"\"\"\n df = sort_df(df)\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n df_split = np.array_split(df, num)\n subpath = os.path.join(path, '2_Inventories')\n alpha_index = 0\n last_i = len(df_split) - 1\n for item in df_split:\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = date_bounds.dt.strftime('%Y%m%d').tolist()\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds\n [1].strftime('%Y%m%d')]\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1\n ] + '_' + path_split[3] + '_' + date_bounds_format[0][2:\n ] + '_' + prevdate[2:] + '.csv'\n print(filename)\n filepath = os.path.join(subpath, filename)\n comb_df.to_csv(filepath)\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') ==\n date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\n<function token>\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\ndef extract_content(df, label='All_Languages'):\n \"\"\"Takes an input data frame and extracts the individual Tweets and places it in chronological directories\n incremented by intervals based on a month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content\n only in the file. Each file is named accordingly like the following example:\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ymd = date_bounds.dt.strftime('%Y%m%d').tolist()\n date_bounds_hms = date_bounds.dt.strftime('%H%M').tolist()\n content = pd.Series(df['stripped_tweet'].tolist())\n unid = pd.Series(df['unique_id_ida'].tolist())\n print('Total Files to process: ' + str(len(date_bounds_ymd)))\n parentdir = os.path.join(path, label)\n os.mkdir(parentdir)\n for i in range(0, len(date_bounds_ymd)):\n dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]\n fulldir = os.path.join(parentdir, dir)\n filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3\n ] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4\n ] + '.txt'\n outpath = os.path.join(fulldir, filename)\n if os.path.exists(outpath):\n pass\n else:\n if os.path.isdir(fulldir):\n pass\n else:\n os.mkdir(fulldir)\n if int(i) % 10000 == 0:\n print('Files up to ' + str(i) + ' processed.')\n f = open(outpath, 'w', encoding='utf-8')\n f.write(content[i])\n f.close()\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\ndef main():\n print('Start time: ' + str(datetime.now()))\n infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'\n inpath = os.path.join(path, '1_DataFrames')\n infilepath = os.path.join(inpath, infile)\n stripped_en = pd.read_pickle(infilepath)\n print(stripped_en.head()['unique_id_ida'])\n extract_content(stripped_en, 'English')\n print('End time: ' + str(datetime.now()))\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef convert_comb(files):\n \"\"\"Takes a list of *.csv file inputs and creates a single data frames from each of them.\n\n Inputs: Input *.csv files to be combined into a single data frame\n Outputs: A data frame for each input and a single output combined data frame\n \"\"\"\n dfs = list()\n for file in files:\n ext_path = os.path.join(path, '1_DataFrames')\n df = pd.read_pickle(os.path.join(ext_path, file))\n dfs.append(df)\n new_df = merge_df(dfs)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n new_df.to_pickle(os.path.join(ext_path, outfile))\n return new_df\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\ndef split_df(df, num=30):\n \"\"\"Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n \"\"\"\n df = sort_df(df)\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n df_split = np.array_split(df, num)\n subpath = os.path.join(path, '2_Inventories')\n alpha_index = 0\n last_i = len(df_split) - 1\n for item in df_split:\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = date_bounds.dt.strftime('%Y%m%d').tolist()\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds\n [1].strftime('%Y%m%d')]\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1\n ] + '_' + path_split[3] + '_' + date_bounds_format[0][2:\n ] + '_' + prevdate[2:] + '.csv'\n print(filename)\n filepath = os.path.join(subpath, filename)\n comb_df.to_csv(filepath)\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') ==\n date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\n<function token>\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\ndef extract_content(df, label='All_Languages'):\n \"\"\"Takes an input data frame and extracts the individual Tweets and places it in chronological directories\n incremented by intervals based on a month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content\n only in the file. Each file is named accordingly like the following example:\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ymd = date_bounds.dt.strftime('%Y%m%d').tolist()\n date_bounds_hms = date_bounds.dt.strftime('%H%M').tolist()\n content = pd.Series(df['stripped_tweet'].tolist())\n unid = pd.Series(df['unique_id_ida'].tolist())\n print('Total Files to process: ' + str(len(date_bounds_ymd)))\n parentdir = os.path.join(path, label)\n os.mkdir(parentdir)\n for i in range(0, len(date_bounds_ymd)):\n dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]\n fulldir = os.path.join(parentdir, dir)\n filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3\n ] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4\n ] + '.txt'\n outpath = os.path.join(fulldir, filename)\n if os.path.exists(outpath):\n pass\n else:\n if os.path.isdir(fulldir):\n pass\n else:\n os.mkdir(fulldir)\n if int(i) % 10000 == 0:\n print('Files up to ' + str(i) + ' processed.')\n f = open(outpath, 'w', encoding='utf-8')\n f.write(content[i])\n f.close()\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\ndef main():\n print('Start time: ' + str(datetime.now()))\n infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'\n inpath = os.path.join(path, '1_DataFrames')\n infilepath = os.path.join(inpath, infile)\n stripped_en = pd.read_pickle(infilepath)\n print(stripped_en.head()['unique_id_ida'])\n extract_content(stripped_en, 'English')\n print('End time: ' + str(datetime.now()))\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef convert_comb(files):\n \"\"\"Takes a list of *.csv file inputs and creates a single data frames from each of them.\n\n Inputs: Input *.csv files to be combined into a single data frame\n Outputs: A data frame for each input and a single output combined data frame\n \"\"\"\n dfs = list()\n for file in files:\n ext_path = os.path.join(path, '1_DataFrames')\n df = pd.read_pickle(os.path.join(ext_path, file))\n dfs.append(df)\n new_df = merge_df(dfs)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n new_df.to_pickle(os.path.join(ext_path, outfile))\n return new_df\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\ndef split_df(df, num=30):\n \"\"\"Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n \"\"\"\n df = sort_df(df)\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n df_split = np.array_split(df, num)\n subpath = os.path.join(path, '2_Inventories')\n alpha_index = 0\n last_i = len(df_split) - 1\n for item in df_split:\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = date_bounds.dt.strftime('%Y%m%d').tolist()\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds\n [1].strftime('%Y%m%d')]\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1\n ] + '_' + path_split[3] + '_' + date_bounds_format[0][2:\n ] + '_' + prevdate[2:] + '.csv'\n print(filename)\n filepath = os.path.join(subpath, filename)\n comb_df.to_csv(filepath)\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') ==\n date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\n<function token>\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\ndef extract_content(df, label='All_Languages'):\n \"\"\"Takes an input data frame and extracts the individual Tweets and places it in chronological directories\n incremented by intervals based on a month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content\n only in the file. Each file is named accordingly like the following example:\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ymd = date_bounds.dt.strftime('%Y%m%d').tolist()\n date_bounds_hms = date_bounds.dt.strftime('%H%M').tolist()\n content = pd.Series(df['stripped_tweet'].tolist())\n unid = pd.Series(df['unique_id_ida'].tolist())\n print('Total Files to process: ' + str(len(date_bounds_ymd)))\n parentdir = os.path.join(path, label)\n os.mkdir(parentdir)\n for i in range(0, len(date_bounds_ymd)):\n dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]\n fulldir = os.path.join(parentdir, dir)\n filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3\n ] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4\n ] + '.txt'\n outpath = os.path.join(fulldir, filename)\n if os.path.exists(outpath):\n pass\n else:\n if os.path.isdir(fulldir):\n pass\n else:\n os.mkdir(fulldir)\n if int(i) % 10000 == 0:\n print('Files up to ' + str(i) + ' processed.')\n f = open(outpath, 'w', encoding='utf-8')\n f.write(content[i])\n f.close()\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef convert_comb(files):\n \"\"\"Takes a list of *.csv file inputs and creates a single data frames from each of them.\n\n Inputs: Input *.csv files to be combined into a single data frame\n Outputs: A data frame for each input and a single output combined data frame\n \"\"\"\n dfs = list()\n for file in files:\n ext_path = os.path.join(path, '1_DataFrames')\n df = pd.read_pickle(os.path.join(ext_path, file))\n dfs.append(df)\n new_df = merge_df(dfs)\n outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n new_df.to_pickle(os.path.join(ext_path, outfile))\n return new_df\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\ndef split_df(df, num=30):\n \"\"\"Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n \"\"\"\n df = sort_df(df)\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n df_split = np.array_split(df, num)\n subpath = os.path.join(path, '2_Inventories')\n alpha_index = 0\n last_i = len(df_split) - 1\n for item in df_split:\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = date_bounds.dt.strftime('%Y%m%d').tolist()\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds\n [1].strftime('%Y%m%d')]\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1\n ] + '_' + path_split[3] + '_' + date_bounds_format[0][2:\n ] + '_' + prevdate[2:] + '.csv'\n print(filename)\n filepath = os.path.join(subpath, filename)\n comb_df.to_csv(filepath)\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') ==\n date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\n<function token>\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\n<function token>\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\ndef split_df(df, num=30):\n \"\"\"Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls\n the sort function to sort the data frame by the default (date).\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Number of inventories to split into. Users discresction depending on size of the data set.\n Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a\n way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are\n sorted by date with the ranges of dates in each inventory in the file name\n (i.e. AA_Twitter10M_090509_130214.csv)\n \"\"\"\n df = sort_df(df)\n alphabets = string.ascii_lowercase\n a_list = list()\n for i in alphabets:\n for j in alphabets:\n a_list.append(i.upper() + j.upper())\n df_split = np.array_split(df, num)\n subpath = os.path.join(path, '2_Inventories')\n alpha_index = 0\n last_i = len(df_split) - 1\n for item in df_split:\n df_sub = pd.concat([item.head(1), item.tail(1)])\n date_bounds = pd.Series(df_sub['tweet_time'].tolist())\n date_bounds_format = date_bounds.dt.strftime('%Y%m%d').tolist()\n to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds\n [1].strftime('%Y%m%d')]\n if alpha_index == 0:\n comb_df = to_file\n elif alpha_index == last_i:\n comb_df = pd.concat([extra_rows, item], axis=0)\n else:\n comb_df = pd.concat([extra_rows, to_file], axis=0)\n prevdate = str(int(date_bounds_format[1]) - 1)\n filename = a_list[alpha_index] + '_' + path_split[1\n ] + '_' + path_split[3] + '_' + date_bounds_format[0][2:\n ] + '_' + prevdate[2:] + '.csv'\n print(filename)\n filepath = os.path.join(subpath, filename)\n comb_df.to_csv(filepath)\n extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') ==\n date_bounds[1].strftime('%Y%m%d')]\n alpha_index += 1\n\n\n<function token>\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\n<function token>\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\n<function token>\n<function token>\n\n\ndef strip_formatting(df, lim, lang='allLang'):\n \"\"\"Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.\n Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-\n data frame.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Character limit for parsing after strip functionality implemented\n Language label user provides for file naming (if it is a data frame describing a particular language)\n Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities\n of interest\n \"\"\"\n tweets = df['tweet_text'].to_list()\n edit_tweets = list()\n include = list()\n for tweet in tweets:\n strip_tweet = strip_accounts(remove_punctuation(strip_html_entities\n (strip_links(strip_emoji(tweet)))))\n edit_tweets.append(strip_tweet)\n if is_length(strip_tweet, lim):\n include.append('1')\n else:\n include.append('0')\n df['stripped_tweet'] = edit_tweets\n df['tweet_length'] = df['tweet_text'].str.len()\n df['include_topic_model'] = include\n df['stripped_tweet_length'] = df['include_topic_model'].str.len()\n sub_df = df.loc[df['include_topic_model'] == '1']\n outfile = path_split[1] + '_' + path_split[3\n ] + '_sorted_strip_' + lang + '_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n sub_df.to_pickle(os.path.join(ext_path, outfile))\n return sub_df\n\n\n<function token>\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef generate_freq(df):\n \"\"\"Takes an input data frame and generates a histogram of number of tweets binned by month.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Input parameter called \"increment\", which determined by what time interval the tweets are organized\n Outputs: Histogram\n \"\"\"\n date_bounds = pd.Series(df['tweet_time'].tolist())\n date_bounds_ym = date_bounds.dt.strftime('%Y-%m').tolist()\n df['date_md'] = np.array(date_bounds_ym)\n sort = df.sort_values(by=['date_md'])\n frq = sort['date_md'].value_counts().to_dict()\n frq_df = sort['date_md'].value_counts()\n od = collections.OrderedDict(sorted(frq.items()))\n rf_dates = list()\n for item in list(od.keys()):\n date_rf = date_reformat(item)\n rf_dates.append(date_rf)\n data = {'Date': rf_dates, 'Freq': list(od.values())}\n graph_frame = pd.dataframe(data=data)\n frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))\n ax = graph_frame.plot.bar(x='Date', y='Freq', rot=45)\n plt.show()\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef sort_df(df, field='tweet_time'):\n \"\"\" Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It\n also adds a column called \"unique_id_ida\" with formatted ID numbers for each tweet.\n\n Inputs: Pandas data frame imported from *.csv or *.pkl file\n Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)\n Outputs: *.pkl file in the input directory with \"sorted\" label sorted, containing additional column \"unique_id_ida\"\n with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)\n \"\"\"\n if field == 'tweet_time':\n df['tweet_time'] = pd.to_datetime(df.tweet_time)\n df = df.sort_values(by=field)\n in_list = list()\n for i in range(0, len(df.index)):\n i = str(i)\n while len(i) != 7:\n i = '0' + i\n in_list.append(i)\n df['unique_id_ida'] = np.array(in_list)\n outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'\n ext_path = os.path.join(path, '1_DataFrames')\n df.to_pickle(os.path.join(ext_path, outfile))\n return df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,624 |
8aae72d0986bf6d18395f8d5937a0e1f6b36fda6
|
from functools import reduce
import sys
sys.path.append('../Day5')
from ipu import IPU
sys.path.append('../Day15')
from visualizer import Visualizer
def isIntersection(grid, pos):
def positionIsIntersection(x,y):
return (x,y) in grid and grid[(x,y)] == '#'
return positionIsIntersection(pos[0] + 0, pos[1] - 1) and positionIsIntersection(pos[0] - 1, pos[1] + 0) and positionIsIntersection(pos[0] + 1, pos[1] + 0) and positionIsIntersection(pos[0] + 0, pos[1] + 1)
def getInitalGrid(memory):
ipu = IPU(list(initalMemory))
grid = {}
x = y = 0
robotPos = None
line = ''
while not ipu.hasHalted:
output,_ = ipu.runUntilOutput()
c = chr(output)
if c == '\n':
y += 1
x = 0
print(line)
line = ''
else:
if c == '^':
robotPos = (x,y)
line += c
grid[(x,y)] = c
x += 1
print(line)
return grid, robotPos
def a(grid):
intersections = [p for p in grid.keys() if grid[p] == '#' and isIntersection(grid, p)]
alignmentParameters = list(map(lambda pos: pos[0] * pos[1], intersections))
return sum(alignmentParameters)
def b(memory, visualizer):
# Manual solve ftw
# PATH: L10 R10 L10 L10 R10 R12 L12 L10 R10 L10 L10 R10 R12 L12 R12 L12 R6 R12 L12 R6 R10 R12 L12 R10 L10 L10 R10 R12 L12 R12 L12 R6
# ORDER: A B A B C C B A B C
# RULES:
# A: L10 R10 L10 L10
# B: R10 R12 L12
# C: R12 L12 R6
ipu = IPU(memory)
ipu.memory[0] = 2
def addAsAsciiInput(ascii):
for c in ascii:
ipu.input.appendleft(ord(c))
ipu.input.appendleft(10)
addAsAsciiInput('A,B,A,B,C,C,B,A,B,C')
addAsAsciiInput('L,10,R,10,L,10,L,10')
addAsAsciiInput('R,10,R,12,L,12')
addAsAsciiInput('R,12,L,12,R,6')
addAsAsciiInput('y')
x = y = 0
while not ipu.hasHalted:
output,_ = ipu.runUntilOutput()
if(output < 255):
c = chr(output)
if c == '\n':
y += 1
if x == 0:
y = 0
visualizer.update()
visualizer.processEvents()
x = 0
elif c in ['.', '#', '^', 'v', '<', '>']:
visualizer.updateGridPos(x, y, output, False)
x += 1
else:
return output
return -1
initalMemory = [int(x) for x in open('input.txt').readline().split(',')]
grid, playerPos = getInitalGrid(initalMemory)
print('A: %i' % a(grid))
visualizer = Visualizer((800,800), {
ord('#'): (200,200,200),
ord('^'): (0,0,255),
ord('>'): (0,0,255),
ord('v'): (0,0,255),
ord('<'): (0,0,255)
})
print('B: %i' % b(initalMemory, visualizer))
visualizer.waitTillEnd()
|
[
"from functools import reduce\nimport sys\nsys.path.append('../Day5')\nfrom ipu import IPU\n\nsys.path.append('../Day15')\nfrom visualizer import Visualizer\n\ndef isIntersection(grid, pos):\n def positionIsIntersection(x,y):\n return (x,y) in grid and grid[(x,y)] == '#'\n\n return positionIsIntersection(pos[0] + 0, pos[1] - 1) and positionIsIntersection(pos[0] - 1, pos[1] + 0) and positionIsIntersection(pos[0] + 1, pos[1] + 0) and positionIsIntersection(pos[0] + 0, pos[1] + 1)\n\ndef getInitalGrid(memory):\n ipu = IPU(list(initalMemory))\n\n grid = {}\n x = y = 0\n robotPos = None\n line = ''\n while not ipu.hasHalted:\n output,_ = ipu.runUntilOutput()\n c = chr(output)\n\n if c == '\\n':\n y += 1\n x = 0\n\n print(line)\n line = ''\n else:\n if c == '^':\n robotPos = (x,y)\n\n line += c\n grid[(x,y)] = c\n x += 1\n\n print(line)\n return grid, robotPos\n\ndef a(grid):\n intersections = [p for p in grid.keys() if grid[p] == '#' and isIntersection(grid, p)]\n alignmentParameters = list(map(lambda pos: pos[0] * pos[1], intersections))\n return sum(alignmentParameters)\n\ndef b(memory, visualizer):\n # Manual solve ftw\n # PATH: L10 R10 L10 L10 R10 R12 L12 L10 R10 L10 L10 R10 R12 L12 R12 L12 R6 R12 L12 R6 R10 R12 L12 R10 L10 L10 R10 R12 L12 R12 L12 R6\n # ORDER: A B A B C C B A B C\n # RULES:\n # A: L10 R10 L10 L10\n # B: R10 R12 L12\n # C: R12 L12 R6\n \n ipu = IPU(memory)\n ipu.memory[0] = 2\n\n def addAsAsciiInput(ascii):\n for c in ascii:\n ipu.input.appendleft(ord(c))\n\n ipu.input.appendleft(10)\n\n addAsAsciiInput('A,B,A,B,C,C,B,A,B,C')\n addAsAsciiInput('L,10,R,10,L,10,L,10')\n addAsAsciiInput('R,10,R,12,L,12')\n addAsAsciiInput('R,12,L,12,R,6')\n\n addAsAsciiInput('y')\n\n x = y = 0\n while not ipu.hasHalted:\n output,_ = ipu.runUntilOutput()\n\n if(output < 255):\n c = chr(output)\n \n if c == '\\n':\n y += 1\n\n if x == 0:\n y = 0\n visualizer.update()\n visualizer.processEvents()\n\n x = 0\n elif c in ['.', '#', '^', 'v', '<', '>']:\n visualizer.updateGridPos(x, y, output, False)\n x += 1\n \n else: \n return output\n\n return -1\n\n\n\ninitalMemory = [int(x) for x in open('input.txt').readline().split(',')]\ngrid, playerPos = getInitalGrid(initalMemory)\n\nprint('A: %i' % a(grid))\n\nvisualizer = Visualizer((800,800), {\n ord('#'): (200,200,200),\n ord('^'): (0,0,255),\n ord('>'): (0,0,255),\n ord('v'): (0,0,255),\n ord('<'): (0,0,255)\n })\n\nprint('B: %i' % b(initalMemory, visualizer))\n\nvisualizer.waitTillEnd()",
"from functools import reduce\nimport sys\nsys.path.append('../Day5')\nfrom ipu import IPU\nsys.path.append('../Day15')\nfrom visualizer import Visualizer\n\n\ndef isIntersection(grid, pos):\n\n def positionIsIntersection(x, y):\n return (x, y) in grid and grid[x, y] == '#'\n return positionIsIntersection(pos[0] + 0, pos[1] - 1\n ) and positionIsIntersection(pos[0] - 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 0, pos[1] + 1)\n\n\ndef getInitalGrid(memory):\n ipu = IPU(list(initalMemory))\n grid = {}\n x = y = 0\n robotPos = None\n line = ''\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n c = chr(output)\n if c == '\\n':\n y += 1\n x = 0\n print(line)\n line = ''\n else:\n if c == '^':\n robotPos = x, y\n line += c\n grid[x, y] = c\n x += 1\n print(line)\n return grid, robotPos\n\n\ndef a(grid):\n intersections = [p for p in grid.keys() if grid[p] == '#' and\n isIntersection(grid, p)]\n alignmentParameters = list(map(lambda pos: pos[0] * pos[1], intersections))\n return sum(alignmentParameters)\n\n\ndef b(memory, visualizer):\n ipu = IPU(memory)\n ipu.memory[0] = 2\n\n def addAsAsciiInput(ascii):\n for c in ascii:\n ipu.input.appendleft(ord(c))\n ipu.input.appendleft(10)\n addAsAsciiInput('A,B,A,B,C,C,B,A,B,C')\n addAsAsciiInput('L,10,R,10,L,10,L,10')\n addAsAsciiInput('R,10,R,12,L,12')\n addAsAsciiInput('R,12,L,12,R,6')\n addAsAsciiInput('y')\n x = y = 0\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n if output < 255:\n c = chr(output)\n if c == '\\n':\n y += 1\n if x == 0:\n y = 0\n visualizer.update()\n visualizer.processEvents()\n x = 0\n elif c in ['.', '#', '^', 'v', '<', '>']:\n visualizer.updateGridPos(x, y, output, False)\n x += 1\n else:\n return output\n return -1\n\n\ninitalMemory = [int(x) for x in open('input.txt').readline().split(',')]\ngrid, playerPos = getInitalGrid(initalMemory)\nprint('A: %i' % a(grid))\nvisualizer = Visualizer((800, 800), {ord('#'): (200, 200, 200), ord('^'): (\n 0, 0, 255), ord('>'): (0, 0, 255), ord('v'): (0, 0, 255), ord('<'): (0,\n 0, 255)})\nprint('B: %i' % b(initalMemory, visualizer))\nvisualizer.waitTillEnd()\n",
"<import token>\nsys.path.append('../Day5')\n<import token>\nsys.path.append('../Day15')\n<import token>\n\n\ndef isIntersection(grid, pos):\n\n def positionIsIntersection(x, y):\n return (x, y) in grid and grid[x, y] == '#'\n return positionIsIntersection(pos[0] + 0, pos[1] - 1\n ) and positionIsIntersection(pos[0] - 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 0, pos[1] + 1)\n\n\ndef getInitalGrid(memory):\n ipu = IPU(list(initalMemory))\n grid = {}\n x = y = 0\n robotPos = None\n line = ''\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n c = chr(output)\n if c == '\\n':\n y += 1\n x = 0\n print(line)\n line = ''\n else:\n if c == '^':\n robotPos = x, y\n line += c\n grid[x, y] = c\n x += 1\n print(line)\n return grid, robotPos\n\n\ndef a(grid):\n intersections = [p for p in grid.keys() if grid[p] == '#' and\n isIntersection(grid, p)]\n alignmentParameters = list(map(lambda pos: pos[0] * pos[1], intersections))\n return sum(alignmentParameters)\n\n\ndef b(memory, visualizer):\n ipu = IPU(memory)\n ipu.memory[0] = 2\n\n def addAsAsciiInput(ascii):\n for c in ascii:\n ipu.input.appendleft(ord(c))\n ipu.input.appendleft(10)\n addAsAsciiInput('A,B,A,B,C,C,B,A,B,C')\n addAsAsciiInput('L,10,R,10,L,10,L,10')\n addAsAsciiInput('R,10,R,12,L,12')\n addAsAsciiInput('R,12,L,12,R,6')\n addAsAsciiInput('y')\n x = y = 0\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n if output < 255:\n c = chr(output)\n if c == '\\n':\n y += 1\n if x == 0:\n y = 0\n visualizer.update()\n visualizer.processEvents()\n x = 0\n elif c in ['.', '#', '^', 'v', '<', '>']:\n visualizer.updateGridPos(x, y, output, False)\n x += 1\n else:\n return output\n return -1\n\n\ninitalMemory = [int(x) for x in open('input.txt').readline().split(',')]\ngrid, playerPos = getInitalGrid(initalMemory)\nprint('A: %i' % a(grid))\nvisualizer = Visualizer((800, 800), {ord('#'): (200, 200, 200), ord('^'): (\n 0, 0, 255), ord('>'): (0, 0, 255), ord('v'): (0, 0, 255), ord('<'): (0,\n 0, 255)})\nprint('B: %i' % b(initalMemory, visualizer))\nvisualizer.waitTillEnd()\n",
"<import token>\nsys.path.append('../Day5')\n<import token>\nsys.path.append('../Day15')\n<import token>\n\n\ndef isIntersection(grid, pos):\n\n def positionIsIntersection(x, y):\n return (x, y) in grid and grid[x, y] == '#'\n return positionIsIntersection(pos[0] + 0, pos[1] - 1\n ) and positionIsIntersection(pos[0] - 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 0, pos[1] + 1)\n\n\ndef getInitalGrid(memory):\n ipu = IPU(list(initalMemory))\n grid = {}\n x = y = 0\n robotPos = None\n line = ''\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n c = chr(output)\n if c == '\\n':\n y += 1\n x = 0\n print(line)\n line = ''\n else:\n if c == '^':\n robotPos = x, y\n line += c\n grid[x, y] = c\n x += 1\n print(line)\n return grid, robotPos\n\n\ndef a(grid):\n intersections = [p for p in grid.keys() if grid[p] == '#' and\n isIntersection(grid, p)]\n alignmentParameters = list(map(lambda pos: pos[0] * pos[1], intersections))\n return sum(alignmentParameters)\n\n\ndef b(memory, visualizer):\n ipu = IPU(memory)\n ipu.memory[0] = 2\n\n def addAsAsciiInput(ascii):\n for c in ascii:\n ipu.input.appendleft(ord(c))\n ipu.input.appendleft(10)\n addAsAsciiInput('A,B,A,B,C,C,B,A,B,C')\n addAsAsciiInput('L,10,R,10,L,10,L,10')\n addAsAsciiInput('R,10,R,12,L,12')\n addAsAsciiInput('R,12,L,12,R,6')\n addAsAsciiInput('y')\n x = y = 0\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n if output < 255:\n c = chr(output)\n if c == '\\n':\n y += 1\n if x == 0:\n y = 0\n visualizer.update()\n visualizer.processEvents()\n x = 0\n elif c in ['.', '#', '^', 'v', '<', '>']:\n visualizer.updateGridPos(x, y, output, False)\n x += 1\n else:\n return output\n return -1\n\n\n<assignment token>\nprint('A: %i' % a(grid))\n<assignment token>\nprint('B: %i' % b(initalMemory, visualizer))\nvisualizer.waitTillEnd()\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n\n\ndef isIntersection(grid, pos):\n\n def positionIsIntersection(x, y):\n return (x, y) in grid and grid[x, y] == '#'\n return positionIsIntersection(pos[0] + 0, pos[1] - 1\n ) and positionIsIntersection(pos[0] - 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 0, pos[1] + 1)\n\n\ndef getInitalGrid(memory):\n ipu = IPU(list(initalMemory))\n grid = {}\n x = y = 0\n robotPos = None\n line = ''\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n c = chr(output)\n if c == '\\n':\n y += 1\n x = 0\n print(line)\n line = ''\n else:\n if c == '^':\n robotPos = x, y\n line += c\n grid[x, y] = c\n x += 1\n print(line)\n return grid, robotPos\n\n\ndef a(grid):\n intersections = [p for p in grid.keys() if grid[p] == '#' and\n isIntersection(grid, p)]\n alignmentParameters = list(map(lambda pos: pos[0] * pos[1], intersections))\n return sum(alignmentParameters)\n\n\ndef b(memory, visualizer):\n ipu = IPU(memory)\n ipu.memory[0] = 2\n\n def addAsAsciiInput(ascii):\n for c in ascii:\n ipu.input.appendleft(ord(c))\n ipu.input.appendleft(10)\n addAsAsciiInput('A,B,A,B,C,C,B,A,B,C')\n addAsAsciiInput('L,10,R,10,L,10,L,10')\n addAsAsciiInput('R,10,R,12,L,12')\n addAsAsciiInput('R,12,L,12,R,6')\n addAsAsciiInput('y')\n x = y = 0\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n if output < 255:\n c = chr(output)\n if c == '\\n':\n y += 1\n if x == 0:\n y = 0\n visualizer.update()\n visualizer.processEvents()\n x = 0\n elif c in ['.', '#', '^', 'v', '<', '>']:\n visualizer.updateGridPos(x, y, output, False)\n x += 1\n else:\n return output\n return -1\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n\n\ndef isIntersection(grid, pos):\n\n def positionIsIntersection(x, y):\n return (x, y) in grid and grid[x, y] == '#'\n return positionIsIntersection(pos[0] + 0, pos[1] - 1\n ) and positionIsIntersection(pos[0] - 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 0, pos[1] + 1)\n\n\ndef getInitalGrid(memory):\n ipu = IPU(list(initalMemory))\n grid = {}\n x = y = 0\n robotPos = None\n line = ''\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n c = chr(output)\n if c == '\\n':\n y += 1\n x = 0\n print(line)\n line = ''\n else:\n if c == '^':\n robotPos = x, y\n line += c\n grid[x, y] = c\n x += 1\n print(line)\n return grid, robotPos\n\n\n<function token>\n\n\ndef b(memory, visualizer):\n ipu = IPU(memory)\n ipu.memory[0] = 2\n\n def addAsAsciiInput(ascii):\n for c in ascii:\n ipu.input.appendleft(ord(c))\n ipu.input.appendleft(10)\n addAsAsciiInput('A,B,A,B,C,C,B,A,B,C')\n addAsAsciiInput('L,10,R,10,L,10,L,10')\n addAsAsciiInput('R,10,R,12,L,12')\n addAsAsciiInput('R,12,L,12,R,6')\n addAsAsciiInput('y')\n x = y = 0\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n if output < 255:\n c = chr(output)\n if c == '\\n':\n y += 1\n if x == 0:\n y = 0\n visualizer.update()\n visualizer.processEvents()\n x = 0\n elif c in ['.', '#', '^', 'v', '<', '>']:\n visualizer.updateGridPos(x, y, output, False)\n x += 1\n else:\n return output\n return -1\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n\n\ndef isIntersection(grid, pos):\n\n def positionIsIntersection(x, y):\n return (x, y) in grid and grid[x, y] == '#'\n return positionIsIntersection(pos[0] + 0, pos[1] - 1\n ) and positionIsIntersection(pos[0] - 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 0, pos[1] + 1)\n\n\n<function token>\n<function token>\n\n\ndef b(memory, visualizer):\n ipu = IPU(memory)\n ipu.memory[0] = 2\n\n def addAsAsciiInput(ascii):\n for c in ascii:\n ipu.input.appendleft(ord(c))\n ipu.input.appendleft(10)\n addAsAsciiInput('A,B,A,B,C,C,B,A,B,C')\n addAsAsciiInput('L,10,R,10,L,10,L,10')\n addAsAsciiInput('R,10,R,12,L,12')\n addAsAsciiInput('R,12,L,12,R,6')\n addAsAsciiInput('y')\n x = y = 0\n while not ipu.hasHalted:\n output, _ = ipu.runUntilOutput()\n if output < 255:\n c = chr(output)\n if c == '\\n':\n y += 1\n if x == 0:\n y = 0\n visualizer.update()\n visualizer.processEvents()\n x = 0\n elif c in ['.', '#', '^', 'v', '<', '>']:\n visualizer.updateGridPos(x, y, output, False)\n x += 1\n else:\n return output\n return -1\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n\n\ndef isIntersection(grid, pos):\n\n def positionIsIntersection(x, y):\n return (x, y) in grid and grid[x, y] == '#'\n return positionIsIntersection(pos[0] + 0, pos[1] - 1\n ) and positionIsIntersection(pos[0] - 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 1, pos[1] + 0\n ) and positionIsIntersection(pos[0] + 0, pos[1] + 1)\n\n\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,625 |
fd53b1e160bc1aa5c8f3f04ab78d5e0082247c5f
|
import sys
resultline = ""
for line in sys.stdin:
split = line.split(",")
chr = split[0].split(" ")[0]
if split[2].isdigit():
resultline = chr +"\t" +split[2] +"\t" +str((int(split[2]) + len(split[5]))) +"\t" +split[1].replace(".pfm","") +"\t" +split[4] +"\t" +split[3]
else:
resultline = chr +"\t" +split[3] +"\t" +str((int(split[3]) + len(split[6]))) +"\t" +split[2].replace(".pfm","") +"\t" +split[5] +"\t" +split[4]
print resultline
|
[
"import sys\n\n\nresultline = \"\"\nfor line in sys.stdin:\n\n\tsplit = line.split(\",\")\n\tchr = split[0].split(\" \")[0]\n\n\tif split[2].isdigit():\n\n\t\tresultline = chr +\"\\t\" +split[2] +\"\\t\" +str((int(split[2]) + len(split[5]))) +\"\\t\" +split[1].replace(\".pfm\",\"\") +\"\\t\" +split[4] +\"\\t\" +split[3]\n\telse:\n\t\n\t\tresultline = chr +\"\\t\" +split[3] +\"\\t\" +str((int(split[3]) + len(split[6]))) +\"\\t\" +split[2].replace(\".pfm\",\"\") +\"\\t\" +split[5] +\"\\t\" +split[4]\n \n\tprint resultline\n"
] | true |
98,626 |
6c4f10c8d441048739e5ace60d0de29112ed5afe
|
__author__ = 'arunprasathshankar'
class FlushOutVHDLFacts(object):
def __init__(self,wl):
self.wl = wl
def printFacts(self):
facts_out = open('vhdl_facts.txt','wb')
facts_out.write('(deffacts VHDLfacts\n')
for word in self.wl:
word[0] = word[0].split(' ')
word[1] = word[1].replace('"',' ')
facts_out.write( '(word-data(word-name "' + word[1] + '")(line-number ' + word[0][3] + ')(word-number ' + word[0][5] + '))\n' )
facts_out.write(')')
facts_out.close()
|
[
"__author__ = 'arunprasathshankar'\nclass FlushOutVHDLFacts(object):\n\n def __init__(self,wl):\n self.wl = wl\n\n def printFacts(self):\n facts_out = open('vhdl_facts.txt','wb')\n facts_out.write('(deffacts VHDLfacts\\n')\n for word in self.wl:\n word[0] = word[0].split(' ')\n word[1] = word[1].replace('\"',' ')\n facts_out.write( '(word-data(word-name \"' + word[1] + '\")(line-number ' + word[0][3] + ')(word-number ' + word[0][5] + '))\\n' )\n facts_out.write(')')\n facts_out.close()\n",
"__author__ = 'arunprasathshankar'\n\n\nclass FlushOutVHDLFacts(object):\n\n def __init__(self, wl):\n self.wl = wl\n\n def printFacts(self):\n facts_out = open('vhdl_facts.txt', 'wb')\n facts_out.write('(deffacts VHDLfacts\\n')\n for word in self.wl:\n word[0] = word[0].split(' ')\n word[1] = word[1].replace('\"', ' ')\n facts_out.write('(word-data(word-name \"' + word[1] +\n '\")(line-number ' + word[0][3] + ')(word-number ' + word[0]\n [5] + '))\\n')\n facts_out.write(')')\n facts_out.close()\n",
"<assignment token>\n\n\nclass FlushOutVHDLFacts(object):\n\n def __init__(self, wl):\n self.wl = wl\n\n def printFacts(self):\n facts_out = open('vhdl_facts.txt', 'wb')\n facts_out.write('(deffacts VHDLfacts\\n')\n for word in self.wl:\n word[0] = word[0].split(' ')\n word[1] = word[1].replace('\"', ' ')\n facts_out.write('(word-data(word-name \"' + word[1] +\n '\")(line-number ' + word[0][3] + ')(word-number ' + word[0]\n [5] + '))\\n')\n facts_out.write(')')\n facts_out.close()\n",
"<assignment token>\n\n\nclass FlushOutVHDLFacts(object):\n <function token>\n\n def printFacts(self):\n facts_out = open('vhdl_facts.txt', 'wb')\n facts_out.write('(deffacts VHDLfacts\\n')\n for word in self.wl:\n word[0] = word[0].split(' ')\n word[1] = word[1].replace('\"', ' ')\n facts_out.write('(word-data(word-name \"' + word[1] +\n '\")(line-number ' + word[0][3] + ')(word-number ' + word[0]\n [5] + '))\\n')\n facts_out.write(')')\n facts_out.close()\n",
"<assignment token>\n\n\nclass FlushOutVHDLFacts(object):\n <function token>\n <function token>\n",
"<assignment token>\n<class token>\n"
] | false |
98,627 |
f91fb1aaa020747f561761169ae7d3092c690abe
|
yr = int(input("Enter the year: "))
if (yr % 4 == 0 and yr % 100 != 0) or yr % 400 == 0 :
print(f"The given year {yr} is a leap year")
else:
print(f"Not a leap year")
|
[
"yr = int(input(\"Enter the year: \"))\n\nif (yr % 4 == 0 and yr % 100 != 0) or yr % 400 == 0 :\n print(f\"The given year {yr} is a leap year\")\nelse:\n print(f\"Not a leap year\")",
"yr = int(input('Enter the year: '))\nif yr % 4 == 0 and yr % 100 != 0 or yr % 400 == 0:\n print(f'The given year {yr} is a leap year')\nelse:\n print(f'Not a leap year')\n",
"<assignment token>\nif yr % 4 == 0 and yr % 100 != 0 or yr % 400 == 0:\n print(f'The given year {yr} is a leap year')\nelse:\n print(f'Not a leap year')\n",
"<assignment token>\n<code token>\n"
] | false |
98,628 |
45fe4cc865e7fc0d19f570be39cf2497a3f3bf3a
|
total = 0
with open("input.txt") as file:
for line in file:
value = int(line)
total += max(0, (value // 3) - 2)
print(total)
|
[
"total = 0\nwith open(\"input.txt\") as file:\n for line in file:\n value = int(line)\n total += max(0, (value // 3) - 2)\nprint(total)\n",
"total = 0\nwith open('input.txt') as file:\n for line in file:\n value = int(line)\n total += max(0, value // 3 - 2)\nprint(total)\n",
"<assignment token>\nwith open('input.txt') as file:\n for line in file:\n value = int(line)\n total += max(0, value // 3 - 2)\nprint(total)\n",
"<assignment token>\n<code token>\n"
] | false |
98,629 |
7eb9726e1bbc5787bec251738990c0d23eb004cd
|
import json
f = open('record.json', 'r')
a = f.read()
f.close()
record = json.loads(a)
pId = input('Product ID: ')
pName = input('Name: ')
pType = input('Type: ')
pRating = int(input('Rating: '))
pPrice = int(input('Price: '))
pQty = int(input('Quantity: '))
pDiscount = int(input('Discount: '))
record[pId] = {'name': pName, 'type': pType, 'rating': pRating, 'price': pPrice, 'qty': pQty, 'discount': pDiscount}
a = json.dumps(record, indent = 2)
f = open('record.json', 'w')
f.write(a)
f.close()
|
[
"import json\nf = open('record.json', 'r')\na = f.read()\nf.close()\nrecord = json.loads(a)\n\npId = input('Product ID: ')\npName = input('Name: ')\npType = input('Type: ')\npRating = int(input('Rating: '))\npPrice = int(input('Price: '))\npQty = int(input('Quantity: '))\npDiscount = int(input('Discount: '))\nrecord[pId] = {'name': pName, 'type': pType, 'rating': pRating, 'price': pPrice, 'qty': pQty, 'discount': pDiscount}\n\na = json.dumps(record, indent = 2)\nf = open('record.json', 'w')\nf.write(a)\nf.close()\n",
"import json\nf = open('record.json', 'r')\na = f.read()\nf.close()\nrecord = json.loads(a)\npId = input('Product ID: ')\npName = input('Name: ')\npType = input('Type: ')\npRating = int(input('Rating: '))\npPrice = int(input('Price: '))\npQty = int(input('Quantity: '))\npDiscount = int(input('Discount: '))\nrecord[pId] = {'name': pName, 'type': pType, 'rating': pRating, 'price':\n pPrice, 'qty': pQty, 'discount': pDiscount}\na = json.dumps(record, indent=2)\nf = open('record.json', 'w')\nf.write(a)\nf.close()\n",
"<import token>\nf = open('record.json', 'r')\na = f.read()\nf.close()\nrecord = json.loads(a)\npId = input('Product ID: ')\npName = input('Name: ')\npType = input('Type: ')\npRating = int(input('Rating: '))\npPrice = int(input('Price: '))\npQty = int(input('Quantity: '))\npDiscount = int(input('Discount: '))\nrecord[pId] = {'name': pName, 'type': pType, 'rating': pRating, 'price':\n pPrice, 'qty': pQty, 'discount': pDiscount}\na = json.dumps(record, indent=2)\nf = open('record.json', 'w')\nf.write(a)\nf.close()\n",
"<import token>\n<assignment token>\nf.close()\n<assignment token>\nf.write(a)\nf.close()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,630 |
9886e14cc76ae0425e1031ad4ee5b3ed011ab9d4
|
import time
import random
import usb_midi
import adafruit_midi
midi = adafruit_midi.MIDI(midi_out=usb_midi.ports[1], out_channel=0)
print("Midi test")
# Convert channel numbers at the presentation layer to the ones musicians use
print("Default output channel:", midi.out_channel + 1)
print("Listening on input channel:",
midi.in_channel + 1 if midi.in_channel is not None else None)
while True:
midi.note_on(44, 120)
midi.note_off(44, 120)
midi.control_change(3, 44)
midi.pitch_bend(random.randint(0, 16383))
time.sleep(1)
|
[
"import time\r\nimport random\r\nimport usb_midi\r\nimport adafruit_midi\r\n\r\nmidi = adafruit_midi.MIDI(midi_out=usb_midi.ports[1], out_channel=0)\r\n\r\nprint(\"Midi test\")\r\n\r\n# Convert channel numbers at the presentation layer to the ones musicians use\r\nprint(\"Default output channel:\", midi.out_channel + 1)\r\nprint(\"Listening on input channel:\",\r\n midi.in_channel + 1 if midi.in_channel is not None else None)\r\n\r\nwhile True:\r\n midi.note_on(44, 120)\r\n midi.note_off(44, 120)\r\n midi.control_change(3, 44)\r\n midi.pitch_bend(random.randint(0, 16383))\r\n time.sleep(1)\r\n",
"import time\nimport random\nimport usb_midi\nimport adafruit_midi\nmidi = adafruit_midi.MIDI(midi_out=usb_midi.ports[1], out_channel=0)\nprint('Midi test')\nprint('Default output channel:', midi.out_channel + 1)\nprint('Listening on input channel:', midi.in_channel + 1 if midi.in_channel\n is not None else None)\nwhile True:\n midi.note_on(44, 120)\n midi.note_off(44, 120)\n midi.control_change(3, 44)\n midi.pitch_bend(random.randint(0, 16383))\n time.sleep(1)\n",
"<import token>\nmidi = adafruit_midi.MIDI(midi_out=usb_midi.ports[1], out_channel=0)\nprint('Midi test')\nprint('Default output channel:', midi.out_channel + 1)\nprint('Listening on input channel:', midi.in_channel + 1 if midi.in_channel\n is not None else None)\nwhile True:\n midi.note_on(44, 120)\n midi.note_off(44, 120)\n midi.control_change(3, 44)\n midi.pitch_bend(random.randint(0, 16383))\n time.sleep(1)\n",
"<import token>\n<assignment token>\nprint('Midi test')\nprint('Default output channel:', midi.out_channel + 1)\nprint('Listening on input channel:', midi.in_channel + 1 if midi.in_channel\n is not None else None)\nwhile True:\n midi.note_on(44, 120)\n midi.note_off(44, 120)\n midi.control_change(3, 44)\n midi.pitch_bend(random.randint(0, 16383))\n time.sleep(1)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,631 |
8d047cd41d50bcfa92a58a7ce433b196360cdc04
|
from web_helper.general_utils import GeneralUtility
class CredentialHelper():
# EmailId
random_email = GeneralUtility.email_generator()
valid_email = "[email protected]"
# Password
random_password = "Amazon" + GeneralUtility.num_string_generator(3)
valid_password = "Amazon123"
# Username
random_username = "test_amazon_" + GeneralUtility.num_string_generator(2)
username = "Test123"
|
[
"from web_helper.general_utils import GeneralUtility\n\n\nclass CredentialHelper():\n\n # EmailId\n random_email = GeneralUtility.email_generator()\n valid_email = \"[email protected]\"\n\n # Password\n random_password = \"Amazon\" + GeneralUtility.num_string_generator(3)\n valid_password = \"Amazon123\"\n\n # Username\n random_username = \"test_amazon_\" + GeneralUtility.num_string_generator(2)\n username = \"Test123\"",
"from web_helper.general_utils import GeneralUtility\n\n\nclass CredentialHelper:\n random_email = GeneralUtility.email_generator()\n valid_email = '[email protected]'\n random_password = 'Amazon' + GeneralUtility.num_string_generator(3)\n valid_password = 'Amazon123'\n random_username = 'test_amazon_' + GeneralUtility.num_string_generator(2)\n username = 'Test123'\n",
"<import token>\n\n\nclass CredentialHelper:\n random_email = GeneralUtility.email_generator()\n valid_email = '[email protected]'\n random_password = 'Amazon' + GeneralUtility.num_string_generator(3)\n valid_password = 'Amazon123'\n random_username = 'test_amazon_' + GeneralUtility.num_string_generator(2)\n username = 'Test123'\n",
"<import token>\n\n\nclass CredentialHelper:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,632 |
6269aa9574528765d064beb98536800196183de9
|
import os, sys, glob, numpy as np
sys.path.insert(0, "/SNS/users/lj7/dv")
from PyChop import PyChop2
instrument = PyChop2('cncs.yaml')
instrument.moderator.mod_pars = [0, 0, 0]
instrument.setChopper('High Resolution')
print instrument.getResFlux(Etrans=0, Ei_in=1)
|
[
"import os, sys, glob, numpy as np\n\nsys.path.insert(0, \"/SNS/users/lj7/dv\")\n\nfrom PyChop import PyChop2\ninstrument = PyChop2('cncs.yaml')\ninstrument.moderator.mod_pars = [0, 0, 0]\ninstrument.setChopper('High Resolution')\nprint instrument.getResFlux(Etrans=0, Ei_in=1)\n"
] | true |
98,633 |
2d919381546395c64581052622e1c7f0331dfaf7
|
'''
Created on Oct 29, 2011
@author: t22cf3p
'''
from xml.dom import minidom
import urllib, httplib, base64
import sys
if __name__ == '__main__':
pass
auth = base64.encodestring('%s:%s' % ("vmrest", "1qazXSW@"))[:-1]
headers = { "Host":"172.31.25.101",
"Authorization": "Basic %s" % auth,
"User-Agent": "NYLSPlunkREST"
}
page=1
numUsers=-1
conn = httplib.HTTPS("172.31.25.101:8443")
conn.set_debuglevel(1)
while True:
if numUsers != 1 and page * 100 > numUsers:
break;
conn.putrequest("GET", "/vmrest/users?rowsPerPage=100&pageNumber="+page.__str__())
conn.putheader("Host","172.31.25.101")
conn.putheader("Authorization","Basic dm1yZXN0OjFxYXpYU1dA")
conn.putheader("User-Agent", "NYLSPlunkREST")
conn.endheaders()
errcode, errmsg, headers = conn.getreply()
++page
if errcode == 200:
xmldoc = minidom.parseString(conn.getfile().read().strip())
if(numUsers==-1):
userField = xmldoc.getElementsByTagName("Users")
userCount = int(userField.item(0).attributes.item(0).value)
print userCount
for userNode in xmldoc.getElementsByTagName("User"):
alias = ""
IsVmEnrolled =""
IsSetForVmEnrollment=""
for userInfo in userNode.childNodes:
if userInfo.nodeName == "Alias":
alias = userInfo.childNodes.item(0).nodeValue
elif userInfo.nodeName == "IsVmEnrolled":
IsVmEnrolled= userInfo.childNodes.item(0).nodeValue
elif userInfo.nodeName == "IsSetForVmEnrollment":
IsSetForVmEnrollment= userInfo.childNodes.item(0).nodeValue
print alias+","+IsVmEnrolled+","+IsSetForVmEnrollment
break;
#Users total="3849"
|
[
"'''\r\nCreated on Oct 29, 2011\r\n\r\n@author: t22cf3p\r\n'''\r\nfrom xml.dom import minidom\r\nimport urllib, httplib, base64\r\nimport sys\r\n\r\nif __name__ == '__main__':\r\n pass\r\n\r\nauth = base64.encodestring('%s:%s' % (\"vmrest\", \"1qazXSW@\"))[:-1]\r\nheaders = { \"Host\":\"172.31.25.101\", \r\n \"Authorization\": \"Basic %s\" % auth,\r\n \"User-Agent\": \"NYLSPlunkREST\"\r\n }\r\n\r\n\r\npage=1\r\nnumUsers=-1\r\n\r\nconn = httplib.HTTPS(\"172.31.25.101:8443\")\r\nconn.set_debuglevel(1) \r\n\r\nwhile True:\r\n if numUsers != 1 and page * 100 > numUsers:\r\n break;\r\n \r\n conn.putrequest(\"GET\", \"/vmrest/users?rowsPerPage=100&pageNumber=\"+page.__str__())\r\n conn.putheader(\"Host\",\"172.31.25.101\")\r\n conn.putheader(\"Authorization\",\"Basic dm1yZXN0OjFxYXpYU1dA\")\r\n conn.putheader(\"User-Agent\", \"NYLSPlunkREST\")\r\n conn.endheaders()\r\n errcode, errmsg, headers = conn.getreply()\r\n ++page\r\n \r\n if errcode == 200:\r\n xmldoc = minidom.parseString(conn.getfile().read().strip())\r\n if(numUsers==-1):\r\n userField = xmldoc.getElementsByTagName(\"Users\")\r\n userCount = int(userField.item(0).attributes.item(0).value)\r\n print userCount\r\n for userNode in xmldoc.getElementsByTagName(\"User\"):\r\n alias = \"\"\r\n IsVmEnrolled =\"\" \r\n IsSetForVmEnrollment=\"\"\r\n \r\n for userInfo in userNode.childNodes:\r\n if userInfo.nodeName == \"Alias\":\r\n alias = userInfo.childNodes.item(0).nodeValue\r\n elif userInfo.nodeName == \"IsVmEnrolled\":\r\n IsVmEnrolled= userInfo.childNodes.item(0).nodeValue\r\n elif userInfo.nodeName == \"IsSetForVmEnrollment\":\r\n IsSetForVmEnrollment= userInfo.childNodes.item(0).nodeValue\r\n print alias+\",\"+IsVmEnrolled+\",\"+IsSetForVmEnrollment\r\n break;\r\n \r\n #Users total=\"3849\"\r\n\r\n"
] | true |
98,634 |
8d43a60a692dfd1dd1a5aa15adc0b9514c5277a8
|
from maze.class_game.position import Position
# -tc- ne pas utiliser d'étoile dans les imports. Ce n'est pas
# -tc- conforme à la PEP8
from maze.constants import *
from random import sample
class Maze:
def __init__(self, filename):
"""Ajouter une docstring"""
self.filename = filename
self.start = ()
self.end = ()
self.paths = []
# -tc- plutôt self.walls = [[], [], [], [], [], [], [], [], [], []]
# -tc- ce qui te permettra de faire un self.walls[0].append(...)
self.wall0 = []
self.wall1 = []
self.wall2 = []
self.wall3 = []
self.wall4 = []
self.wall5 = []
self.wall6 = []
self.wall7 = []
self.wall8 = []
self.wall9 = []
self.objects_to_find = []
def is_path_position(self, position):
"""Ajouter une docstring"""
return position in self.paths
def load_from_file(self):
"""Fonction qui permet de charger la carte du labyrinthe"""
with open(self.filename) as infile:
for x, line in enumerate(infile):
for y, c in enumerate(line):
if c == path_char:
self.paths.append(Position(y * size_sprite, x * size_sprite))
elif c == start_char:
self.start = Position(y * size_sprite, x * size_sprite)
self.paths.append(Position(y * size_sprite, x * size_sprite))
elif c == end_char:
self.end = Position(y * size_sprite, x * size_sprite)
self.paths.append(Position(y * size_sprite, x * size_sprite))
elif c == '0':
self.wall0.append(Position(y * size_sprite, x * size_sprite))
elif c == '1':
self.wall1.append(Position(y * size_sprite, x * size_sprite))
elif c == '2':
self.wall2.append(Position(y * size_sprite, x * size_sprite))
elif c == '3':
self.wall3.append(Position(y * size_sprite, x * size_sprite))
elif c == '4':
self.wall4.append(Position(y * size_sprite, x * size_sprite))
elif c == '5':
self.wall5.append(Position(y * size_sprite, x * size_sprite))
elif c == '6':
self.wall6.append(Position(y * size_sprite, x * size_sprite))
elif c == '7':
self.wall7.append(Position(y * size_sprite, x * size_sprite))
elif c == '8':
self.wall8.append(Position(y * size_sprite, x * size_sprite))
elif c == '9':
self.wall9.append(Position(y * size_sprite, x * size_sprite))
# -tc- Le placement aléatoire des objets se fait bien une seule fois,
# -tc- je ne vois pas de soucis ici
self.objects_to_find = sample(self.paths, 3)
# -tc- Ne pas utiliser print pour débugger mais un debugger
print(self.paths)
# -tc- return inutile et pas utilisé. Ce n'est pas comme cela qu'on procède pour retourner
# -tc- plusieurs valeurs.
return self.paths and self.wall0 and self.wall1 and self.wall2 and self.wall3 and self.wall4 and self.wall5 and self.wall6 and self.wall7 and self.wall8 and self.wall9 and self.objects_to_find and self.start and self.end
def display(self, window_name, wall0_name, wall1_name, wall2_name, wall3_name,
wall4_name, wall5_name, wall6_name, wall7_name, wall8_name, wall9_name, needle_name, tube_name,
ether_name):
"""Ajouter une docstring"""
# -tc- Si self.walls est une liste de listes, cela te permet de tout parcourir
# -tc- avec une double boucle plutôt que de créer 10 boucles
for position in self.wall0:
window_name.blit(wall0_name, position.position)
for position in self.wall1:
window_name.blit(wall1_name, position.position)
for position in self.wall2:
window_name.blit(wall2_name, position.position)
for position in self.wall3:
window_name.blit(wall3_name, position.position)
for position in self.wall4:
window_name.blit(wall4_name, position.position)
for position in self.wall5:
window_name.blit(wall5_name, position.position)
for position in self.wall6:
window_name.blit(wall6_name, position.position)
for position in self.wall7:
window_name.blit(wall7_name, position.position)
for position in self.wall8:
window_name.blit(wall8_name, position.position)
for position in self.wall9:
window_name.blit(wall9_name, position.position)
objects = [needle_name, tube_name, ether_name]
# -tc- utiliser enumerate() plutôt que de devoir maintenir une variable x
# -tc- de position.
x = 0
for position in self.objects_to_find:
window_name.blit(objects[x], position.position)
x += 1
def main():
level = Maze("Maze/level/level1")
level.load_from_file()
# [(0, 0), (30, 0), (60, 0), (120, 0), (150, 0), (180, 0), (210, 0), (240, 0), (300, 0), (330, 0), (360, 0),
# (390, 0), (60, 30), (120, 30), (300, 30), (390, 30), (0, 60), (30, 60), (60, 60), (90, 60), (120, 60), (180, 60),
# (210, 60), (240, 60), (300, 60), (360, 60), (390, 60), (0, 90), (120, 90), (180, 90), (240, 90), (270, 90),
# (300, 90), (360, 90), (60, 120), (90, 120), (120, 120), (180, 120), (360, 120), (390, 120), (420, 120), (0, 150),
# (60, 150), (180, 150), (240, 150), (270, 150), (300, 150), (420, 150), (0, 180), (60, 180), (90, 180), (120, 180),
# (150, 180), (180, 180), (240, 180), (300, 180), (330, 180), (360, 180), (390, 180), (420, 180), (0, 210),
# (30, 210), (60, 210), (120, 210), (240, 210), (390, 210), (120, 240), (180, 240), (210, 240), (240, 240),
# (270, 240), (330, 240), (390, 240), (30, 270), (60, 270), (90, 270), (120, 270), (180, 270), (270, 270),
# (330, 270), (360, 270), (390, 270), (30, 300), (120, 300), (180, 300), (210, 300), (270, 300), (390, 300),
# (420, 300), (30, 330), (60, 330), (90, 330), (120, 330), (210, 330), (270, 330), (300, 330), (330, 330),
# (420, 330), (180, 360), (210, 360), (330, 360), (360, 360), (390, 360), (0, 390), (30, 390), (60, 390), (90, 390),
# (120, 390), (180, 390), (240, 390), (270, 390), (300, 390), (330, 390), (390, 390), (420, 390), (0, 420),
# (120, 420), (150, 420), (180, 420), (240, 420), (420, 420)]
p = Position(0, 0).right()
print(p)
print(level.is_path_position(p))
if __name__ == "__main__":
main()
|
[
"from maze.class_game.position import Position\n# -tc- ne pas utiliser d'étoile dans les imports. Ce n'est pas\n# -tc- conforme à la PEP8\nfrom maze.constants import *\nfrom random import sample\n\n\nclass Maze:\n\n def __init__(self, filename):\n \"\"\"Ajouter une docstring\"\"\"\n self.filename = filename\n self.start = ()\n self.end = ()\n self.paths = []\n # -tc- plutôt self.walls = [[], [], [], [], [], [], [], [], [], []]\n # -tc- ce qui te permettra de faire un self.walls[0].append(...)\n self.wall0 = []\n self.wall1 = []\n self.wall2 = []\n self.wall3 = []\n self.wall4 = []\n self.wall5 = []\n self.wall6 = []\n self.wall7 = []\n self.wall8 = []\n self.wall9 = []\n self.objects_to_find = []\n\n def is_path_position(self, position):\n \"\"\"Ajouter une docstring\"\"\"\n return position in self.paths\n\n def load_from_file(self):\n \"\"\"Fonction qui permet de charger la carte du labyrinthe\"\"\"\n with open(self.filename) as infile:\n for x, line in enumerate(infile):\n for y, c in enumerate(line):\n if c == path_char:\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == start_char:\n self.start = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == end_char:\n self.end = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == '0':\n self.wall0.append(Position(y * size_sprite, x * size_sprite))\n elif c == '1':\n self.wall1.append(Position(y * size_sprite, x * size_sprite))\n elif c == '2':\n self.wall2.append(Position(y * size_sprite, x * size_sprite))\n elif c == '3':\n self.wall3.append(Position(y * size_sprite, x * size_sprite))\n elif c == '4':\n self.wall4.append(Position(y * size_sprite, x * size_sprite))\n elif c == '5':\n self.wall5.append(Position(y * size_sprite, x * size_sprite))\n elif c == '6':\n self.wall6.append(Position(y * size_sprite, x * size_sprite))\n elif c == '7':\n self.wall7.append(Position(y * size_sprite, x * size_sprite))\n elif c == '8':\n self.wall8.append(Position(y * size_sprite, x * size_sprite))\n elif c == '9':\n self.wall9.append(Position(y * size_sprite, x * size_sprite))\n # -tc- Le placement aléatoire des objets se fait bien une seule fois,\n # -tc- je ne vois pas de soucis ici\n self.objects_to_find = sample(self.paths, 3)\n # -tc- Ne pas utiliser print pour débugger mais un debugger\n print(self.paths)\n\n # -tc- return inutile et pas utilisé. Ce n'est pas comme cela qu'on procède pour retourner \n # -tc- plusieurs valeurs.\n return self.paths and self.wall0 and self.wall1 and self.wall2 and self.wall3 and self.wall4 and self.wall5 and self.wall6 and self.wall7 and self.wall8 and self.wall9 and self.objects_to_find and self.start and self.end\n\n def display(self, window_name, wall0_name, wall1_name, wall2_name, wall3_name,\n wall4_name, wall5_name, wall6_name, wall7_name, wall8_name, wall9_name, needle_name, tube_name,\n ether_name):\n \"\"\"Ajouter une docstring\"\"\"\n\n # -tc- Si self.walls est une liste de listes, cela te permet de tout parcourir\n # -tc- avec une double boucle plutôt que de créer 10 boucles\n for position in self.wall0:\n window_name.blit(wall0_name, position.position)\n for position in self.wall1:\n window_name.blit(wall1_name, position.position)\n for position in self.wall2:\n window_name.blit(wall2_name, position.position)\n for position in self.wall3:\n window_name.blit(wall3_name, position.position)\n for position in self.wall4:\n window_name.blit(wall4_name, position.position)\n for position in self.wall5:\n window_name.blit(wall5_name, position.position)\n for position in self.wall6:\n window_name.blit(wall6_name, position.position)\n for position in self.wall7:\n window_name.blit(wall7_name, position.position)\n for position in self.wall8:\n window_name.blit(wall8_name, position.position)\n for position in self.wall9:\n window_name.blit(wall9_name, position.position)\n objects = [needle_name, tube_name, ether_name]\n # -tc- utiliser enumerate() plutôt que de devoir maintenir une variable x\n # -tc- de position.\n x = 0\n for position in self.objects_to_find:\n window_name.blit(objects[x], position.position)\n x += 1\n\n\ndef main():\n level = Maze(\"Maze/level/level1\")\n level.load_from_file()\n # [(0, 0), (30, 0), (60, 0), (120, 0), (150, 0), (180, 0), (210, 0), (240, 0), (300, 0), (330, 0), (360, 0),\n # (390, 0), (60, 30), (120, 30), (300, 30), (390, 30), (0, 60), (30, 60), (60, 60), (90, 60), (120, 60), (180, 60),\n # (210, 60), (240, 60), (300, 60), (360, 60), (390, 60), (0, 90), (120, 90), (180, 90), (240, 90), (270, 90),\n # (300, 90), (360, 90), (60, 120), (90, 120), (120, 120), (180, 120), (360, 120), (390, 120), (420, 120), (0, 150),\n # (60, 150), (180, 150), (240, 150), (270, 150), (300, 150), (420, 150), (0, 180), (60, 180), (90, 180), (120, 180),\n # (150, 180), (180, 180), (240, 180), (300, 180), (330, 180), (360, 180), (390, 180), (420, 180), (0, 210),\n # (30, 210), (60, 210), (120, 210), (240, 210), (390, 210), (120, 240), (180, 240), (210, 240), (240, 240),\n # (270, 240), (330, 240), (390, 240), (30, 270), (60, 270), (90, 270), (120, 270), (180, 270), (270, 270),\n # (330, 270), (360, 270), (390, 270), (30, 300), (120, 300), (180, 300), (210, 300), (270, 300), (390, 300),\n # (420, 300), (30, 330), (60, 330), (90, 330), (120, 330), (210, 330), (270, 330), (300, 330), (330, 330),\n # (420, 330), (180, 360), (210, 360), (330, 360), (360, 360), (390, 360), (0, 390), (30, 390), (60, 390), (90, 390),\n # (120, 390), (180, 390), (240, 390), (270, 390), (300, 390), (330, 390), (390, 390), (420, 390), (0, 420),\n # (120, 420), (150, 420), (180, 420), (240, 420), (420, 420)]\n p = Position(0, 0).right()\n print(p)\n print(level.is_path_position(p))\n\n\nif __name__ == \"__main__\":\n main()\n",
"from maze.class_game.position import Position\nfrom maze.constants import *\nfrom random import sample\n\n\nclass Maze:\n\n def __init__(self, filename):\n \"\"\"Ajouter une docstring\"\"\"\n self.filename = filename\n self.start = ()\n self.end = ()\n self.paths = []\n self.wall0 = []\n self.wall1 = []\n self.wall2 = []\n self.wall3 = []\n self.wall4 = []\n self.wall5 = []\n self.wall6 = []\n self.wall7 = []\n self.wall8 = []\n self.wall9 = []\n self.objects_to_find = []\n\n def is_path_position(self, position):\n \"\"\"Ajouter une docstring\"\"\"\n return position in self.paths\n\n def load_from_file(self):\n \"\"\"Fonction qui permet de charger la carte du labyrinthe\"\"\"\n with open(self.filename) as infile:\n for x, line in enumerate(infile):\n for y, c in enumerate(line):\n if c == path_char:\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == start_char:\n self.start = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == end_char:\n self.end = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '0':\n self.wall0.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '1':\n self.wall1.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '2':\n self.wall2.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '3':\n self.wall3.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '4':\n self.wall4.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '5':\n self.wall5.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '6':\n self.wall6.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '7':\n self.wall7.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '8':\n self.wall8.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '9':\n self.wall9.append(Position(y * size_sprite, x *\n size_sprite))\n self.objects_to_find = sample(self.paths, 3)\n print(self.paths)\n return (self.paths and self.wall0 and self.wall1 and self.wall2 and\n self.wall3 and self.wall4 and self.wall5 and self.wall6 and\n self.wall7 and self.wall8 and self.wall9 and self.\n objects_to_find and self.start and self.end)\n\n def display(self, window_name, wall0_name, wall1_name, wall2_name,\n wall3_name, wall4_name, wall5_name, wall6_name, wall7_name,\n wall8_name, wall9_name, needle_name, tube_name, ether_name):\n \"\"\"Ajouter une docstring\"\"\"\n for position in self.wall0:\n window_name.blit(wall0_name, position.position)\n for position in self.wall1:\n window_name.blit(wall1_name, position.position)\n for position in self.wall2:\n window_name.blit(wall2_name, position.position)\n for position in self.wall3:\n window_name.blit(wall3_name, position.position)\n for position in self.wall4:\n window_name.blit(wall4_name, position.position)\n for position in self.wall5:\n window_name.blit(wall5_name, position.position)\n for position in self.wall6:\n window_name.blit(wall6_name, position.position)\n for position in self.wall7:\n window_name.blit(wall7_name, position.position)\n for position in self.wall8:\n window_name.blit(wall8_name, position.position)\n for position in self.wall9:\n window_name.blit(wall9_name, position.position)\n objects = [needle_name, tube_name, ether_name]\n x = 0\n for position in self.objects_to_find:\n window_name.blit(objects[x], position.position)\n x += 1\n\n\ndef main():\n level = Maze('Maze/level/level1')\n level.load_from_file()\n p = Position(0, 0).right()\n print(p)\n print(level.is_path_position(p))\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\nclass Maze:\n\n def __init__(self, filename):\n \"\"\"Ajouter une docstring\"\"\"\n self.filename = filename\n self.start = ()\n self.end = ()\n self.paths = []\n self.wall0 = []\n self.wall1 = []\n self.wall2 = []\n self.wall3 = []\n self.wall4 = []\n self.wall5 = []\n self.wall6 = []\n self.wall7 = []\n self.wall8 = []\n self.wall9 = []\n self.objects_to_find = []\n\n def is_path_position(self, position):\n \"\"\"Ajouter une docstring\"\"\"\n return position in self.paths\n\n def load_from_file(self):\n \"\"\"Fonction qui permet de charger la carte du labyrinthe\"\"\"\n with open(self.filename) as infile:\n for x, line in enumerate(infile):\n for y, c in enumerate(line):\n if c == path_char:\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == start_char:\n self.start = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == end_char:\n self.end = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '0':\n self.wall0.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '1':\n self.wall1.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '2':\n self.wall2.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '3':\n self.wall3.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '4':\n self.wall4.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '5':\n self.wall5.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '6':\n self.wall6.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '7':\n self.wall7.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '8':\n self.wall8.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '9':\n self.wall9.append(Position(y * size_sprite, x *\n size_sprite))\n self.objects_to_find = sample(self.paths, 3)\n print(self.paths)\n return (self.paths and self.wall0 and self.wall1 and self.wall2 and\n self.wall3 and self.wall4 and self.wall5 and self.wall6 and\n self.wall7 and self.wall8 and self.wall9 and self.\n objects_to_find and self.start and self.end)\n\n def display(self, window_name, wall0_name, wall1_name, wall2_name,\n wall3_name, wall4_name, wall5_name, wall6_name, wall7_name,\n wall8_name, wall9_name, needle_name, tube_name, ether_name):\n \"\"\"Ajouter une docstring\"\"\"\n for position in self.wall0:\n window_name.blit(wall0_name, position.position)\n for position in self.wall1:\n window_name.blit(wall1_name, position.position)\n for position in self.wall2:\n window_name.blit(wall2_name, position.position)\n for position in self.wall3:\n window_name.blit(wall3_name, position.position)\n for position in self.wall4:\n window_name.blit(wall4_name, position.position)\n for position in self.wall5:\n window_name.blit(wall5_name, position.position)\n for position in self.wall6:\n window_name.blit(wall6_name, position.position)\n for position in self.wall7:\n window_name.blit(wall7_name, position.position)\n for position in self.wall8:\n window_name.blit(wall8_name, position.position)\n for position in self.wall9:\n window_name.blit(wall9_name, position.position)\n objects = [needle_name, tube_name, ether_name]\n x = 0\n for position in self.objects_to_find:\n window_name.blit(objects[x], position.position)\n x += 1\n\n\ndef main():\n level = Maze('Maze/level/level1')\n level.load_from_file()\n p = Position(0, 0).right()\n print(p)\n print(level.is_path_position(p))\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\nclass Maze:\n\n def __init__(self, filename):\n \"\"\"Ajouter une docstring\"\"\"\n self.filename = filename\n self.start = ()\n self.end = ()\n self.paths = []\n self.wall0 = []\n self.wall1 = []\n self.wall2 = []\n self.wall3 = []\n self.wall4 = []\n self.wall5 = []\n self.wall6 = []\n self.wall7 = []\n self.wall8 = []\n self.wall9 = []\n self.objects_to_find = []\n\n def is_path_position(self, position):\n \"\"\"Ajouter une docstring\"\"\"\n return position in self.paths\n\n def load_from_file(self):\n \"\"\"Fonction qui permet de charger la carte du labyrinthe\"\"\"\n with open(self.filename) as infile:\n for x, line in enumerate(infile):\n for y, c in enumerate(line):\n if c == path_char:\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == start_char:\n self.start = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == end_char:\n self.end = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '0':\n self.wall0.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '1':\n self.wall1.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '2':\n self.wall2.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '3':\n self.wall3.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '4':\n self.wall4.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '5':\n self.wall5.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '6':\n self.wall6.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '7':\n self.wall7.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '8':\n self.wall8.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '9':\n self.wall9.append(Position(y * size_sprite, x *\n size_sprite))\n self.objects_to_find = sample(self.paths, 3)\n print(self.paths)\n return (self.paths and self.wall0 and self.wall1 and self.wall2 and\n self.wall3 and self.wall4 and self.wall5 and self.wall6 and\n self.wall7 and self.wall8 and self.wall9 and self.\n objects_to_find and self.start and self.end)\n\n def display(self, window_name, wall0_name, wall1_name, wall2_name,\n wall3_name, wall4_name, wall5_name, wall6_name, wall7_name,\n wall8_name, wall9_name, needle_name, tube_name, ether_name):\n \"\"\"Ajouter une docstring\"\"\"\n for position in self.wall0:\n window_name.blit(wall0_name, position.position)\n for position in self.wall1:\n window_name.blit(wall1_name, position.position)\n for position in self.wall2:\n window_name.blit(wall2_name, position.position)\n for position in self.wall3:\n window_name.blit(wall3_name, position.position)\n for position in self.wall4:\n window_name.blit(wall4_name, position.position)\n for position in self.wall5:\n window_name.blit(wall5_name, position.position)\n for position in self.wall6:\n window_name.blit(wall6_name, position.position)\n for position in self.wall7:\n window_name.blit(wall7_name, position.position)\n for position in self.wall8:\n window_name.blit(wall8_name, position.position)\n for position in self.wall9:\n window_name.blit(wall9_name, position.position)\n objects = [needle_name, tube_name, ether_name]\n x = 0\n for position in self.objects_to_find:\n window_name.blit(objects[x], position.position)\n x += 1\n\n\ndef main():\n level = Maze('Maze/level/level1')\n level.load_from_file()\n p = Position(0, 0).right()\n print(p)\n print(level.is_path_position(p))\n\n\n<code token>\n",
"<import token>\n\n\nclass Maze:\n\n def __init__(self, filename):\n \"\"\"Ajouter une docstring\"\"\"\n self.filename = filename\n self.start = ()\n self.end = ()\n self.paths = []\n self.wall0 = []\n self.wall1 = []\n self.wall2 = []\n self.wall3 = []\n self.wall4 = []\n self.wall5 = []\n self.wall6 = []\n self.wall7 = []\n self.wall8 = []\n self.wall9 = []\n self.objects_to_find = []\n\n def is_path_position(self, position):\n \"\"\"Ajouter une docstring\"\"\"\n return position in self.paths\n\n def load_from_file(self):\n \"\"\"Fonction qui permet de charger la carte du labyrinthe\"\"\"\n with open(self.filename) as infile:\n for x, line in enumerate(infile):\n for y, c in enumerate(line):\n if c == path_char:\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == start_char:\n self.start = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == end_char:\n self.end = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '0':\n self.wall0.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '1':\n self.wall1.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '2':\n self.wall2.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '3':\n self.wall3.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '4':\n self.wall4.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '5':\n self.wall5.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '6':\n self.wall6.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '7':\n self.wall7.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '8':\n self.wall8.append(Position(y * size_sprite, x *\n size_sprite))\n elif c == '9':\n self.wall9.append(Position(y * size_sprite, x *\n size_sprite))\n self.objects_to_find = sample(self.paths, 3)\n print(self.paths)\n return (self.paths and self.wall0 and self.wall1 and self.wall2 and\n self.wall3 and self.wall4 and self.wall5 and self.wall6 and\n self.wall7 and self.wall8 and self.wall9 and self.\n objects_to_find and self.start and self.end)\n\n def display(self, window_name, wall0_name, wall1_name, wall2_name,\n wall3_name, wall4_name, wall5_name, wall6_name, wall7_name,\n wall8_name, wall9_name, needle_name, tube_name, ether_name):\n \"\"\"Ajouter une docstring\"\"\"\n for position in self.wall0:\n window_name.blit(wall0_name, position.position)\n for position in self.wall1:\n window_name.blit(wall1_name, position.position)\n for position in self.wall2:\n window_name.blit(wall2_name, position.position)\n for position in self.wall3:\n window_name.blit(wall3_name, position.position)\n for position in self.wall4:\n window_name.blit(wall4_name, position.position)\n for position in self.wall5:\n window_name.blit(wall5_name, position.position)\n for position in self.wall6:\n window_name.blit(wall6_name, position.position)\n for position in self.wall7:\n window_name.blit(wall7_name, position.position)\n for position in self.wall8:\n window_name.blit(wall8_name, position.position)\n for position in self.wall9:\n window_name.blit(wall9_name, position.position)\n objects = [needle_name, tube_name, ether_name]\n x = 0\n for position in self.objects_to_find:\n window_name.blit(objects[x], position.position)\n x += 1\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Maze:\n\n def __init__(self, filename):\n \"\"\"Ajouter une docstring\"\"\"\n self.filename = filename\n self.start = ()\n self.end = ()\n self.paths = []\n self.wall0 = []\n self.wall1 = []\n self.wall2 = []\n self.wall3 = []\n self.wall4 = []\n self.wall5 = []\n self.wall6 = []\n self.wall7 = []\n self.wall8 = []\n self.wall9 = []\n self.objects_to_find = []\n\n def is_path_position(self, position):\n \"\"\"Ajouter une docstring\"\"\"\n return position in self.paths\n <function token>\n\n def display(self, window_name, wall0_name, wall1_name, wall2_name,\n wall3_name, wall4_name, wall5_name, wall6_name, wall7_name,\n wall8_name, wall9_name, needle_name, tube_name, ether_name):\n \"\"\"Ajouter une docstring\"\"\"\n for position in self.wall0:\n window_name.blit(wall0_name, position.position)\n for position in self.wall1:\n window_name.blit(wall1_name, position.position)\n for position in self.wall2:\n window_name.blit(wall2_name, position.position)\n for position in self.wall3:\n window_name.blit(wall3_name, position.position)\n for position in self.wall4:\n window_name.blit(wall4_name, position.position)\n for position in self.wall5:\n window_name.blit(wall5_name, position.position)\n for position in self.wall6:\n window_name.blit(wall6_name, position.position)\n for position in self.wall7:\n window_name.blit(wall7_name, position.position)\n for position in self.wall8:\n window_name.blit(wall8_name, position.position)\n for position in self.wall9:\n window_name.blit(wall9_name, position.position)\n objects = [needle_name, tube_name, ether_name]\n x = 0\n for position in self.objects_to_find:\n window_name.blit(objects[x], position.position)\n x += 1\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Maze:\n\n def __init__(self, filename):\n \"\"\"Ajouter une docstring\"\"\"\n self.filename = filename\n self.start = ()\n self.end = ()\n self.paths = []\n self.wall0 = []\n self.wall1 = []\n self.wall2 = []\n self.wall3 = []\n self.wall4 = []\n self.wall5 = []\n self.wall6 = []\n self.wall7 = []\n self.wall8 = []\n self.wall9 = []\n self.objects_to_find = []\n <function token>\n <function token>\n\n def display(self, window_name, wall0_name, wall1_name, wall2_name,\n wall3_name, wall4_name, wall5_name, wall6_name, wall7_name,\n wall8_name, wall9_name, needle_name, tube_name, ether_name):\n \"\"\"Ajouter une docstring\"\"\"\n for position in self.wall0:\n window_name.blit(wall0_name, position.position)\n for position in self.wall1:\n window_name.blit(wall1_name, position.position)\n for position in self.wall2:\n window_name.blit(wall2_name, position.position)\n for position in self.wall3:\n window_name.blit(wall3_name, position.position)\n for position in self.wall4:\n window_name.blit(wall4_name, position.position)\n for position in self.wall5:\n window_name.blit(wall5_name, position.position)\n for position in self.wall6:\n window_name.blit(wall6_name, position.position)\n for position in self.wall7:\n window_name.blit(wall7_name, position.position)\n for position in self.wall8:\n window_name.blit(wall8_name, position.position)\n for position in self.wall9:\n window_name.blit(wall9_name, position.position)\n objects = [needle_name, tube_name, ether_name]\n x = 0\n for position in self.objects_to_find:\n window_name.blit(objects[x], position.position)\n x += 1\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Maze:\n\n def __init__(self, filename):\n \"\"\"Ajouter une docstring\"\"\"\n self.filename = filename\n self.start = ()\n self.end = ()\n self.paths = []\n self.wall0 = []\n self.wall1 = []\n self.wall2 = []\n self.wall3 = []\n self.wall4 = []\n self.wall5 = []\n self.wall6 = []\n self.wall7 = []\n self.wall8 = []\n self.wall9 = []\n self.objects_to_find = []\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Maze:\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<function token>\n<code token>\n"
] | false |
98,635 |
603f4f69548b8ce5ac67d6e282efe404ae91c94d
|
from pyspark_proxy.proxy import Proxy
__all__ = ['Column']
class Column(Proxy):
def alias(self, *args, **kwargs):
return self._call(self._id, 'alias', (args, kwargs))
def cast(self, *args, **kwargs):
return self._call(self._id, 'cast', (args, kwargs))
def __repr__(self):
return self._call(self._id, '__repr__', ((), {}))
# better way to define these?
def _op_func(self, name, *args, **kwargs):
return self._call(self._id, '__neg__', (args, kwargs))
def __add__(self, *args, **kwargs):
return self._call(self._id, '__add__', (args, kwargs))
def __sub__(self, *args, **kwargs):
return self._call(self._id, '__sub__', (args, kwargs))
def __mul__(self, *args, **kwargs):
return self._call(self._id, '__mul__', (args, kwargs))
def __div__(self, *args, **kwargs):
return self._call(self._id, '__div__', (args, kwargs))
def __truediv__(self, *args, **kwargs):
return self._call(self._id, '__truediv__', (args, kwargs))
def __mod__(self, *args, **kwargs):
return self._call(self._id, '__mod__', (args, kwargs))
def __radd__(self, *args, **kwargs):
return self._call(self._id, '__radd__', (args, kwargs))
def __rsub__(self, *args, **kwargs):
return self._call(self._id, '__rsub__', (args, kwargs))
def __rmul__(self, *args, **kwargs):
return self._call(self._id, '__rmul__', (args, kwargs))
def __rdiv__(self, *args, **kwargs):
return self._call(self._id, '__rdiv__', (args, kwargs))
def __rtruediv__(self, *args, **kwargs):
return self._call(self._id, '__rdiv__', (args, kwargs))
def __rmod__(self, *args, **kwargs):
return self._call(self._id, '__rmod__', (args, kwargs))
def __pow__(self, *args, **kwargs):
return self._call(self._id, '__pow__', (args, kwargs))
def __rpow__(self, *args, **kwargs):
return self._call(self._id, '__rpow__', (args, kwargs))
def __eq__(self, *args, **kwargs):
return self._call(self._id, '__eq__', (args, kwargs))
def __ne__(self, *args, **kwargs):
return self._call(self._id, '__ne__', (args, kwargs))
def __lt__(self, *args, **kwargs):
return self._call(self._id, '__lt__', (args, kwargs))
def __le__(self, *args, **kwargs):
return self._call(self._id, '__le__', (args, kwargs))
def __ge__(self, *args, **kwargs):
return self._call(self._id, '__ge__', (args, kwargs))
def __gt__(self, *args, **kwargs):
return self._call(self._id, '__gt__', (args, kwargs))
def __and__(self, *args, **kwargs):
return self._call(self._id, '__and__', (args, kwargs))
def __or__(self, *args, **kwargs):
return self._call(self._id, '__or__', (args, kwargs))
def __invert__(self, *args, **kwargs):
return self._call(self._id, '__invert__', (args, kwargs))
def __rand__(self, *args, **kwargs):
return self._call(self._id, '__rand__', (args, kwargs))
def __ror__(self, *args, **kwargs):
return self._call(self._id, '__ror__', (args, kwargs))
|
[
"from pyspark_proxy.proxy import Proxy\n\n__all__ = ['Column']\n\nclass Column(Proxy):\n def alias(self, *args, **kwargs):\n return self._call(self._id, 'alias', (args, kwargs))\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n\n # better way to define these?\n def _op_func(self, name, *args, **kwargs):\n return self._call(self._id, '__neg__', (args, kwargs))\n\n def __add__(self, *args, **kwargs):\n return self._call(self._id, '__add__', (args, kwargs))\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n\n def __div__(self, *args, **kwargs):\n return self._call(self._id, '__div__', (args, kwargs))\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n\n def __lt__(self, *args, **kwargs):\n return self._call(self._id, '__lt__', (args, kwargs))\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n\n def __ror__(self, *args, **kwargs):\n return self._call(self._id, '__ror__', (args, kwargs))\n",
"from pyspark_proxy.proxy import Proxy\n__all__ = ['Column']\n\n\nclass Column(Proxy):\n\n def alias(self, *args, **kwargs):\n return self._call(self._id, 'alias', (args, kwargs))\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n\n def _op_func(self, name, *args, **kwargs):\n return self._call(self._id, '__neg__', (args, kwargs))\n\n def __add__(self, *args, **kwargs):\n return self._call(self._id, '__add__', (args, kwargs))\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n\n def __div__(self, *args, **kwargs):\n return self._call(self._id, '__div__', (args, kwargs))\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n\n def __lt__(self, *args, **kwargs):\n return self._call(self._id, '__lt__', (args, kwargs))\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n\n def __ror__(self, *args, **kwargs):\n return self._call(self._id, '__ror__', (args, kwargs))\n",
"<import token>\n__all__ = ['Column']\n\n\nclass Column(Proxy):\n\n def alias(self, *args, **kwargs):\n return self._call(self._id, 'alias', (args, kwargs))\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n\n def _op_func(self, name, *args, **kwargs):\n return self._call(self._id, '__neg__', (args, kwargs))\n\n def __add__(self, *args, **kwargs):\n return self._call(self._id, '__add__', (args, kwargs))\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n\n def __div__(self, *args, **kwargs):\n return self._call(self._id, '__div__', (args, kwargs))\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n\n def __lt__(self, *args, **kwargs):\n return self._call(self._id, '__lt__', (args, kwargs))\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n\n def __ror__(self, *args, **kwargs):\n return self._call(self._id, '__ror__', (args, kwargs))\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n\n def alias(self, *args, **kwargs):\n return self._call(self._id, 'alias', (args, kwargs))\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n\n def _op_func(self, name, *args, **kwargs):\n return self._call(self._id, '__neg__', (args, kwargs))\n\n def __add__(self, *args, **kwargs):\n return self._call(self._id, '__add__', (args, kwargs))\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n\n def __div__(self, *args, **kwargs):\n return self._call(self._id, '__div__', (args, kwargs))\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n\n def __lt__(self, *args, **kwargs):\n return self._call(self._id, '__lt__', (args, kwargs))\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n\n def __ror__(self, *args, **kwargs):\n return self._call(self._id, '__ror__', (args, kwargs))\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n\n def alias(self, *args, **kwargs):\n return self._call(self._id, 'alias', (args, kwargs))\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n\n def _op_func(self, name, *args, **kwargs):\n return self._call(self._id, '__neg__', (args, kwargs))\n\n def __add__(self, *args, **kwargs):\n return self._call(self._id, '__add__', (args, kwargs))\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n\n def __div__(self, *args, **kwargs):\n return self._call(self._id, '__div__', (args, kwargs))\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n\n def __ror__(self, *args, **kwargs):\n return self._call(self._id, '__ror__', (args, kwargs))\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n\n def alias(self, *args, **kwargs):\n return self._call(self._id, 'alias', (args, kwargs))\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n\n def __add__(self, *args, **kwargs):\n return self._call(self._id, '__add__', (args, kwargs))\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n\n def __div__(self, *args, **kwargs):\n return self._call(self._id, '__div__', (args, kwargs))\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n\n def __ror__(self, *args, **kwargs):\n return self._call(self._id, '__ror__', (args, kwargs))\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n\n def __add__(self, *args, **kwargs):\n return self._call(self._id, '__add__', (args, kwargs))\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n\n def __div__(self, *args, **kwargs):\n return self._call(self._id, '__div__', (args, kwargs))\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n\n def __ror__(self, *args, **kwargs):\n return self._call(self._id, '__ror__', (args, kwargs))\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n\n def __add__(self, *args, **kwargs):\n return self._call(self._id, '__add__', (args, kwargs))\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n <function token>\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n\n def __ror__(self, *args, **kwargs):\n return self._call(self._id, '__ror__', (args, kwargs))\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n\n def __add__(self, *args, **kwargs):\n return self._call(self._id, '__add__', (args, kwargs))\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n <function token>\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n <function token>\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n\n def __rpow__(self, *args, **kwargs):\n return self._call(self._id, '__rpow__', (args, kwargs))\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n\n def __mul__(self, *args, **kwargs):\n return self._call(self._id, '__mul__', (args, kwargs))\n <function token>\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n\n def cast(self, *args, **kwargs):\n return self._call(self._id, 'cast', (args, kwargs))\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n <function token>\n <function token>\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n <function token>\n <function token>\n\n def __truediv__(self, *args, **kwargs):\n return self._call(self._id, '__truediv__', (args, kwargs))\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n\n def __ne__(self, *args, **kwargs):\n return self._call(self._id, '__ne__', (args, kwargs))\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n\n def __gt__(self, *args, **kwargs):\n return self._call(self._id, '__gt__', (args, kwargs))\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n\n def __rsub__(self, *args, **kwargs):\n return self._call(self._id, '__rsub__', (args, kwargs))\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n <function token>\n\n def __rmul__(self, *args, **kwargs):\n return self._call(self._id, '__rmul__', (args, kwargs))\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n\n def __radd__(self, *args, **kwargs):\n return self._call(self._id, '__radd__', (args, kwargs))\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n\n def __sub__(self, *args, **kwargs):\n return self._call(self._id, '__sub__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n\n def __invert__(self, *args, **kwargs):\n return self._call(self._id, '__invert__', (args, kwargs))\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n <function token>\n\n def __rand__(self, *args, **kwargs):\n return self._call(self._id, '__rand__', (args, kwargs))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n\n def __ge__(self, *args, **kwargs):\n return self._call(self._id, '__ge__', (args, kwargs))\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __mod__(self, *args, **kwargs):\n return self._call(self._id, '__mod__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n <function token>\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n\n def __le__(self, *args, **kwargs):\n return self._call(self._id, '__le__', (args, kwargs))\n <function token>\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n\n def __eq__(self, *args, **kwargs):\n return self._call(self._id, '__eq__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n\n def __repr__(self):\n return self._call(self._id, '__repr__', ((), {}))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rdiv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __and__(self, *args, **kwargs):\n return self._call(self._id, '__and__', (args, kwargs))\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __or__(self, *args, **kwargs):\n return self._call(self._id, '__or__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n\n def __pow__(self, *args, **kwargs):\n return self._call(self._id, '__pow__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rtruediv__(self, *args, **kwargs):\n return self._call(self._id, '__rdiv__', (args, kwargs))\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rmod__(self, *args, **kwargs):\n return self._call(self._id, '__rmod__', (args, kwargs))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Column(Proxy):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
98,636 |
3063038ee8287aee7be4f6567cbc6152f8d2e5cf
|
import unittest
import json
import sys
import os
from lib.Inventory import Inventory
inventory_file = 'json/inventory.json'
out_inventory_file = ''
expected_inventory_list = {
"iron_plate": 40,
"iron_gear": 5,
"copper_plate": 20,
"copper_cable": 10,
"lubricant": 100
}
class TestInventory(unittest.TestCase):
def setUp(self):
self.inventory = Inventory(inventory_file, out_inventory_file, False)
def test_load_inventory(self):
self.assertTrue(inventory_file.endswith(".json") or inventory_file.endswith(".JSON"))
self.assertTrue(os.path.isfile(inventory_file))
self.assertEqual(self.inventory.load_inventory(inventory_file), expected_inventory_list)
def test_add_item(self):
self.assertFalse(self.inventory.add_item("iron_abc", "abc"))
self.assertTrue(self.inventory.add_item("iron_gear", 1))
self.assertEqual(self.inventory.get_item_quantity("iron_gear"), 6)
def test_remove_item(self):
self.assertTrue(self.inventory.remove_item("iron_gear", 1))
self.assertFalse(self.inventory.remove_item("iron_gear", 10))
self.assertFalse(self.inventory.remove_item("iron_iron", 1))
self.assertEqual(self.inventory.get_item_quantity("iron_gear"), 4)
def test_set_item_quantity(self):
self.assertTrue(self.inventory.set_item_quantity("iron_gear", 1))
self.assertTrue(self.inventory.set_item_quantity("iron_gear", 10))
self.assertFalse(self.inventory.set_item_quantity("iron_abc", "abc"))
self.assertEqual(self.inventory.get_item_quantity("iron_gear"), 10)
def test_get_item_quantity(self):
self.assertEqual(self.inventory.get_item_quantity("iron_gear"), 5)
self.assertEqual(self.inventory.get_item_quantity("iron_abc"), 0)
def test_check_item(self):
self.assertTrue(self.inventory.check_item("iron_gear", 1))
self.assertFalse(self.inventory.check_item("iron_gear", 10))
self.assertFalse(self.inventory.check_item("iron_iron", 1))
|
[
"import unittest\nimport json\nimport sys\nimport os\nfrom lib.Inventory import Inventory\n\ninventory_file = 'json/inventory.json'\nout_inventory_file = ''\nexpected_inventory_list = {\n \"iron_plate\": 40,\n \"iron_gear\": 5,\n \"copper_plate\": 20,\n \"copper_cable\": 10,\n \"lubricant\": 100\n}\n\nclass TestInventory(unittest.TestCase):\n\n def setUp(self):\n self.inventory = Inventory(inventory_file, out_inventory_file, False)\n\n def test_load_inventory(self):\n self.assertTrue(inventory_file.endswith(\".json\") or inventory_file.endswith(\".JSON\"))\n self.assertTrue(os.path.isfile(inventory_file))\n self.assertEqual(self.inventory.load_inventory(inventory_file), expected_inventory_list)\n\n def test_add_item(self):\n self.assertFalse(self.inventory.add_item(\"iron_abc\", \"abc\"))\n self.assertTrue(self.inventory.add_item(\"iron_gear\", 1))\n self.assertEqual(self.inventory.get_item_quantity(\"iron_gear\"), 6)\n\n def test_remove_item(self):\n self.assertTrue(self.inventory.remove_item(\"iron_gear\", 1))\n self.assertFalse(self.inventory.remove_item(\"iron_gear\", 10))\n self.assertFalse(self.inventory.remove_item(\"iron_iron\", 1))\n self.assertEqual(self.inventory.get_item_quantity(\"iron_gear\"), 4)\n\n def test_set_item_quantity(self):\n self.assertTrue(self.inventory.set_item_quantity(\"iron_gear\", 1))\n self.assertTrue(self.inventory.set_item_quantity(\"iron_gear\", 10))\n self.assertFalse(self.inventory.set_item_quantity(\"iron_abc\", \"abc\"))\n self.assertEqual(self.inventory.get_item_quantity(\"iron_gear\"), 10)\n\n def test_get_item_quantity(self):\n self.assertEqual(self.inventory.get_item_quantity(\"iron_gear\"), 5)\n self.assertEqual(self.inventory.get_item_quantity(\"iron_abc\"), 0)\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item(\"iron_gear\", 1))\n self.assertFalse(self.inventory.check_item(\"iron_gear\", 10))\n self.assertFalse(self.inventory.check_item(\"iron_iron\", 1))\n",
"import unittest\nimport json\nimport sys\nimport os\nfrom lib.Inventory import Inventory\ninventory_file = 'json/inventory.json'\nout_inventory_file = ''\nexpected_inventory_list = {'iron_plate': 40, 'iron_gear': 5, 'copper_plate':\n 20, 'copper_cable': 10, 'lubricant': 100}\n\n\nclass TestInventory(unittest.TestCase):\n\n def setUp(self):\n self.inventory = Inventory(inventory_file, out_inventory_file, False)\n\n def test_load_inventory(self):\n self.assertTrue(inventory_file.endswith('.json') or inventory_file.\n endswith('.JSON'))\n self.assertTrue(os.path.isfile(inventory_file))\n self.assertEqual(self.inventory.load_inventory(inventory_file),\n expected_inventory_list)\n\n def test_add_item(self):\n self.assertFalse(self.inventory.add_item('iron_abc', 'abc'))\n self.assertTrue(self.inventory.add_item('iron_gear', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 6)\n\n def test_remove_item(self):\n self.assertTrue(self.inventory.remove_item('iron_gear', 1))\n self.assertFalse(self.inventory.remove_item('iron_gear', 10))\n self.assertFalse(self.inventory.remove_item('iron_iron', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 4)\n\n def test_set_item_quantity(self):\n self.assertTrue(self.inventory.set_item_quantity('iron_gear', 1))\n self.assertTrue(self.inventory.set_item_quantity('iron_gear', 10))\n self.assertFalse(self.inventory.set_item_quantity('iron_abc', 'abc'))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 10)\n\n def test_get_item_quantity(self):\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 5)\n self.assertEqual(self.inventory.get_item_quantity('iron_abc'), 0)\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item('iron_gear', 1))\n self.assertFalse(self.inventory.check_item('iron_gear', 10))\n self.assertFalse(self.inventory.check_item('iron_iron', 1))\n",
"<import token>\ninventory_file = 'json/inventory.json'\nout_inventory_file = ''\nexpected_inventory_list = {'iron_plate': 40, 'iron_gear': 5, 'copper_plate':\n 20, 'copper_cable': 10, 'lubricant': 100}\n\n\nclass TestInventory(unittest.TestCase):\n\n def setUp(self):\n self.inventory = Inventory(inventory_file, out_inventory_file, False)\n\n def test_load_inventory(self):\n self.assertTrue(inventory_file.endswith('.json') or inventory_file.\n endswith('.JSON'))\n self.assertTrue(os.path.isfile(inventory_file))\n self.assertEqual(self.inventory.load_inventory(inventory_file),\n expected_inventory_list)\n\n def test_add_item(self):\n self.assertFalse(self.inventory.add_item('iron_abc', 'abc'))\n self.assertTrue(self.inventory.add_item('iron_gear', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 6)\n\n def test_remove_item(self):\n self.assertTrue(self.inventory.remove_item('iron_gear', 1))\n self.assertFalse(self.inventory.remove_item('iron_gear', 10))\n self.assertFalse(self.inventory.remove_item('iron_iron', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 4)\n\n def test_set_item_quantity(self):\n self.assertTrue(self.inventory.set_item_quantity('iron_gear', 1))\n self.assertTrue(self.inventory.set_item_quantity('iron_gear', 10))\n self.assertFalse(self.inventory.set_item_quantity('iron_abc', 'abc'))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 10)\n\n def test_get_item_quantity(self):\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 5)\n self.assertEqual(self.inventory.get_item_quantity('iron_abc'), 0)\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item('iron_gear', 1))\n self.assertFalse(self.inventory.check_item('iron_gear', 10))\n self.assertFalse(self.inventory.check_item('iron_iron', 1))\n",
"<import token>\n<assignment token>\n\n\nclass TestInventory(unittest.TestCase):\n\n def setUp(self):\n self.inventory = Inventory(inventory_file, out_inventory_file, False)\n\n def test_load_inventory(self):\n self.assertTrue(inventory_file.endswith('.json') or inventory_file.\n endswith('.JSON'))\n self.assertTrue(os.path.isfile(inventory_file))\n self.assertEqual(self.inventory.load_inventory(inventory_file),\n expected_inventory_list)\n\n def test_add_item(self):\n self.assertFalse(self.inventory.add_item('iron_abc', 'abc'))\n self.assertTrue(self.inventory.add_item('iron_gear', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 6)\n\n def test_remove_item(self):\n self.assertTrue(self.inventory.remove_item('iron_gear', 1))\n self.assertFalse(self.inventory.remove_item('iron_gear', 10))\n self.assertFalse(self.inventory.remove_item('iron_iron', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 4)\n\n def test_set_item_quantity(self):\n self.assertTrue(self.inventory.set_item_quantity('iron_gear', 1))\n self.assertTrue(self.inventory.set_item_quantity('iron_gear', 10))\n self.assertFalse(self.inventory.set_item_quantity('iron_abc', 'abc'))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 10)\n\n def test_get_item_quantity(self):\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 5)\n self.assertEqual(self.inventory.get_item_quantity('iron_abc'), 0)\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item('iron_gear', 1))\n self.assertFalse(self.inventory.check_item('iron_gear', 10))\n self.assertFalse(self.inventory.check_item('iron_iron', 1))\n",
"<import token>\n<assignment token>\n\n\nclass TestInventory(unittest.TestCase):\n\n def setUp(self):\n self.inventory = Inventory(inventory_file, out_inventory_file, False)\n\n def test_load_inventory(self):\n self.assertTrue(inventory_file.endswith('.json') or inventory_file.\n endswith('.JSON'))\n self.assertTrue(os.path.isfile(inventory_file))\n self.assertEqual(self.inventory.load_inventory(inventory_file),\n expected_inventory_list)\n\n def test_add_item(self):\n self.assertFalse(self.inventory.add_item('iron_abc', 'abc'))\n self.assertTrue(self.inventory.add_item('iron_gear', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 6)\n\n def test_remove_item(self):\n self.assertTrue(self.inventory.remove_item('iron_gear', 1))\n self.assertFalse(self.inventory.remove_item('iron_gear', 10))\n self.assertFalse(self.inventory.remove_item('iron_iron', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 4)\n\n def test_set_item_quantity(self):\n self.assertTrue(self.inventory.set_item_quantity('iron_gear', 1))\n self.assertTrue(self.inventory.set_item_quantity('iron_gear', 10))\n self.assertFalse(self.inventory.set_item_quantity('iron_abc', 'abc'))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 10)\n <function token>\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item('iron_gear', 1))\n self.assertFalse(self.inventory.check_item('iron_gear', 10))\n self.assertFalse(self.inventory.check_item('iron_iron', 1))\n",
"<import token>\n<assignment token>\n\n\nclass TestInventory(unittest.TestCase):\n\n def setUp(self):\n self.inventory = Inventory(inventory_file, out_inventory_file, False)\n\n def test_load_inventory(self):\n self.assertTrue(inventory_file.endswith('.json') or inventory_file.\n endswith('.JSON'))\n self.assertTrue(os.path.isfile(inventory_file))\n self.assertEqual(self.inventory.load_inventory(inventory_file),\n expected_inventory_list)\n\n def test_add_item(self):\n self.assertFalse(self.inventory.add_item('iron_abc', 'abc'))\n self.assertTrue(self.inventory.add_item('iron_gear', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 6)\n\n def test_remove_item(self):\n self.assertTrue(self.inventory.remove_item('iron_gear', 1))\n self.assertFalse(self.inventory.remove_item('iron_gear', 10))\n self.assertFalse(self.inventory.remove_item('iron_iron', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 4)\n <function token>\n <function token>\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item('iron_gear', 1))\n self.assertFalse(self.inventory.check_item('iron_gear', 10))\n self.assertFalse(self.inventory.check_item('iron_iron', 1))\n",
"<import token>\n<assignment token>\n\n\nclass TestInventory(unittest.TestCase):\n\n def setUp(self):\n self.inventory = Inventory(inventory_file, out_inventory_file, False)\n\n def test_load_inventory(self):\n self.assertTrue(inventory_file.endswith('.json') or inventory_file.\n endswith('.JSON'))\n self.assertTrue(os.path.isfile(inventory_file))\n self.assertEqual(self.inventory.load_inventory(inventory_file),\n expected_inventory_list)\n <function token>\n\n def test_remove_item(self):\n self.assertTrue(self.inventory.remove_item('iron_gear', 1))\n self.assertFalse(self.inventory.remove_item('iron_gear', 10))\n self.assertFalse(self.inventory.remove_item('iron_iron', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 4)\n <function token>\n <function token>\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item('iron_gear', 1))\n self.assertFalse(self.inventory.check_item('iron_gear', 10))\n self.assertFalse(self.inventory.check_item('iron_iron', 1))\n",
"<import token>\n<assignment token>\n\n\nclass TestInventory(unittest.TestCase):\n\n def setUp(self):\n self.inventory = Inventory(inventory_file, out_inventory_file, False)\n <function token>\n <function token>\n\n def test_remove_item(self):\n self.assertTrue(self.inventory.remove_item('iron_gear', 1))\n self.assertFalse(self.inventory.remove_item('iron_gear', 10))\n self.assertFalse(self.inventory.remove_item('iron_iron', 1))\n self.assertEqual(self.inventory.get_item_quantity('iron_gear'), 4)\n <function token>\n <function token>\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item('iron_gear', 1))\n self.assertFalse(self.inventory.check_item('iron_gear', 10))\n self.assertFalse(self.inventory.check_item('iron_iron', 1))\n",
"<import token>\n<assignment token>\n\n\nclass TestInventory(unittest.TestCase):\n\n def setUp(self):\n self.inventory = Inventory(inventory_file, out_inventory_file, False)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item('iron_gear', 1))\n self.assertFalse(self.inventory.check_item('iron_gear', 10))\n self.assertFalse(self.inventory.check_item('iron_iron', 1))\n",
"<import token>\n<assignment token>\n\n\nclass TestInventory(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_check_item(self):\n self.assertTrue(self.inventory.check_item('iron_gear', 1))\n self.assertFalse(self.inventory.check_item('iron_gear', 10))\n self.assertFalse(self.inventory.check_item('iron_iron', 1))\n",
"<import token>\n<assignment token>\n\n\nclass TestInventory(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
98,637 |
ba826457d6cbe5c876fa5af57677a187cf1c48de
|
X, Y, Z = map(int, input().split())
s = X - (X//(Y+Z))*(Y+Z)
print(X//(Y+Z) if s >= Z else X//(Y+Z)-1)
|
[
"X, Y, Z = map(int, input().split())\ns = X - (X//(Y+Z))*(Y+Z)\nprint(X//(Y+Z) if s >= Z else X//(Y+Z)-1)",
"X, Y, Z = map(int, input().split())\ns = X - X // (Y + Z) * (Y + Z)\nprint(X // (Y + Z) if s >= Z else X // (Y + Z) - 1)\n",
"<assignment token>\nprint(X // (Y + Z) if s >= Z else X // (Y + Z) - 1)\n",
"<assignment token>\n<code token>\n"
] | false |
98,638 |
41e4d9b8629e5a2e671a1e74e5eadb1550f96da0
|
import collections
file = open("input.txt","r")
r = [int(x) for x in file.read().split('-')]
passwords, count = [], 0
for i in range(r[0], r[1]+1):
i = str(i)
n, prev = 1, i[0]
consecutive, adjacent = False, True
for j in range(1,6):
if i[j] == prev:
consecutive = True
if prev > i[j]:
adjacent = False
break
prev = i[j]
if consecutive and adjacent:
passwords.append(i)
for p in passwords:
if(2 in collections.Counter(p).values()):
count += 1
print(count)
|
[
"import collections\n\nfile = open(\"input.txt\",\"r\")\nr = [int(x) for x in file.read().split('-')]\npasswords, count = [], 0\n\nfor i in range(r[0], r[1]+1):\n i = str(i)\n n, prev = 1, i[0]\n consecutive, adjacent = False, True\n\n for j in range(1,6):\n if i[j] == prev:\n consecutive = True\n if prev > i[j]:\n adjacent = False\n break\n prev = i[j]\n if consecutive and adjacent:\n passwords.append(i)\n\nfor p in passwords:\n if(2 in collections.Counter(p).values()):\n count += 1\n\nprint(count)",
"import collections\nfile = open('input.txt', 'r')\nr = [int(x) for x in file.read().split('-')]\npasswords, count = [], 0\nfor i in range(r[0], r[1] + 1):\n i = str(i)\n n, prev = 1, i[0]\n consecutive, adjacent = False, True\n for j in range(1, 6):\n if i[j] == prev:\n consecutive = True\n if prev > i[j]:\n adjacent = False\n break\n prev = i[j]\n if consecutive and adjacent:\n passwords.append(i)\nfor p in passwords:\n if 2 in collections.Counter(p).values():\n count += 1\nprint(count)\n",
"<import token>\nfile = open('input.txt', 'r')\nr = [int(x) for x in file.read().split('-')]\npasswords, count = [], 0\nfor i in range(r[0], r[1] + 1):\n i = str(i)\n n, prev = 1, i[0]\n consecutive, adjacent = False, True\n for j in range(1, 6):\n if i[j] == prev:\n consecutive = True\n if prev > i[j]:\n adjacent = False\n break\n prev = i[j]\n if consecutive and adjacent:\n passwords.append(i)\nfor p in passwords:\n if 2 in collections.Counter(p).values():\n count += 1\nprint(count)\n",
"<import token>\n<assignment token>\nfor i in range(r[0], r[1] + 1):\n i = str(i)\n n, prev = 1, i[0]\n consecutive, adjacent = False, True\n for j in range(1, 6):\n if i[j] == prev:\n consecutive = True\n if prev > i[j]:\n adjacent = False\n break\n prev = i[j]\n if consecutive and adjacent:\n passwords.append(i)\nfor p in passwords:\n if 2 in collections.Counter(p).values():\n count += 1\nprint(count)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,639 |
b35b4d23f4e6e750e5cda8a306cef8dba207d36b
|
# Generated by Django 2.2 on 2020-02-26 17:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main_app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='trail',
name='stars',
),
migrations.RemoveField(
model_name='user',
name='description',
),
migrations.RemoveField(
model_name='user',
name='is_admin',
),
]
|
[
"# Generated by Django 2.2 on 2020-02-26 17:38\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main_app', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='trail',\n name='stars',\n ),\n migrations.RemoveField(\n model_name='user',\n name='description',\n ),\n migrations.RemoveField(\n model_name='user',\n name='is_admin',\n ),\n ]\n",
"from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main_app', '0001_initial')]\n operations = [migrations.RemoveField(model_name='trail', name='stars'),\n migrations.RemoveField(model_name='user', name='description'),\n migrations.RemoveField(model_name='user', name='is_admin')]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main_app', '0001_initial')]\n operations = [migrations.RemoveField(model_name='trail', name='stars'),\n migrations.RemoveField(model_name='user', name='description'),\n migrations.RemoveField(model_name='user', name='is_admin')]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,640 |
e11af6b1704985f78f392a47f03de6b45ed9bb8c
|
from src import balancer
from src.logger import Logger
from src.db_instances import DBInstance
# enable debugging
Logger.debug = True
# create our balancers object
myBalancer = balancer.Balancer( )
# create our database instances
instance1 = DBInstance('127.0.0.1', 'root', '' )
instance2 = DBInstance('192.168.1.27', 'boni', '', 'store')
# Add our instances to the balancer
myBalancer.add_db_instance( instance1 )
myBalancer.add_db_instance( instance2 )
# Connect to all instances
# NB: ->>>>Fails if one of them cannot be connected to ->>> Not implemented for now
myBalancer.connect_all()
# Get any failed connections
print myBalancer.get_total_failed_connections()
# Go ahead and select one instance to use in the next connection
# NB: Returns instance object->
instance = myBalancer.choose_instance( )
print instance.to_string()
# close all connections
myBalancer.disconnect_all()
|
[
"from src import balancer\nfrom src.logger import Logger\nfrom src.db_instances import DBInstance\n\n# enable debugging \nLogger.debug = True \n\n# create our balancers object \nmyBalancer = balancer.Balancer( )\n\n# create our database instances\ninstance1 = DBInstance('127.0.0.1', 'root', '' )\ninstance2 = DBInstance('192.168.1.27', 'boni', '', 'store')\n\n# Add our instances to the balancer \nmyBalancer.add_db_instance( instance1 )\nmyBalancer.add_db_instance( instance2 )\n\n# Connect to all instances \n# NB: ->>>>Fails if one of them cannot be connected to ->>> Not implemented for now \nmyBalancer.connect_all()\n\n# Get any failed connections \nprint myBalancer.get_total_failed_connections()\n\n# Go ahead and select one instance to use in the next connection\n# NB: Returns instance object->\ninstance = myBalancer.choose_instance( )\nprint instance.to_string()\n\n# close all connections \nmyBalancer.disconnect_all()"
] | true |
98,641 |
55186fd54a290d058e6a9fd96c8c378aeb110a1f
|
#!/usr/bin/env python
"""
@package mi.core.instrument.data_particle_generator Base data particle generator
@file mi/core/instrument/data_particle_generator.py
@author Steve Foley
@brief Contains logic to generate data particles to be exchanged between
the driver and agent. This involves a JSON interchange format
"""
import time
import ntplib
import base64
import json
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException
from mi.core.log import get_logger
log = get_logger()
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
class CommonDataParticleType(BaseEnum):
"""
This enum defines all the common particle types defined in the modules. Currently there is only one, but by
using an enum here we have the opportunity to define more common data particles.
"""
RAW = "raw"
class DataParticleKey(BaseEnum):
PKT_FORMAT_ID = "pkt_format_id"
PKT_VERSION = "pkt_version"
STREAM_NAME = "stream_name"
INTERNAL_TIMESTAMP = "internal_timestamp"
PORT_TIMESTAMP = "port_timestamp"
DRIVER_TIMESTAMP = "driver_timestamp"
PREFERRED_TIMESTAMP = "preferred_timestamp"
QUALITY_FLAG = "quality_flag"
VALUES = "values"
VALUE_ID = "value_id"
VALUE = "value"
BINARY = "binary"
NEW_SEQUENCE = "new_sequence"
class DataParticleValue(BaseEnum):
JSON_DATA = "JSON_Data"
ENG = "eng"
OK = "ok"
CHECKSUM_FAILED = "checksum_failed"
OUT_OF_RANGE = "out_of_range"
INVALID = "invalid"
QUESTIONABLE = "questionable"
class DataParticle(object):
"""
This class is responsible for storing and ultimately generating data
particles in the designated format from the associated inputs. It
fills in fields as necessary, and is a valid Data Particle
that can be sent up to the InstrumentAgent.
It is the intent that this class is subclassed as needed if an instrument must
modify fields in the outgoing packet. The hope is to have most of the superclass
code be called by the child class with just values overridden as needed.
"""
# data particle type is intended to be defined in each derived data particle class. This value should be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=None,
quality_flag=DataParticleValue.OK,
new_sequence=None):
""" Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
self._values = None
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
if (t1 is None) or (t2 is None):
tdiff = allowed_diff
else:
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def set_internal_timestamp(self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
# if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_port_timestamp(self, timestamp=None, unix_time=None):
"""
Set the port timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value from contents
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def get_value_from_values(self, value_id):
""" Return a stored value from values list
@param value_id The ID of the parameter to return
"""
if not self._values:
return None
values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]
if not values:
return None
return values[0][DataParticleKey.VALUE]
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws SampleException When there is a problem with the preferred
timestamp in the sample.
"""
if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:
raise SampleException("Missing preferred timestamp, %s, in particle" %
self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
# This should be handled downstream. Don't want to not publish data because
# the port agent stopped putting out timestamps
# if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:
# raise SampleException("Preferred timestamp, %s, is not defined" %
# self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
return True
def _encode_value(self, name, value, encoding_function, value_range=None):
"""
Encode a value using the encoding function, if it fails store the error in a queue
:param value_range tuple containing min/max numerical values or min/max lengths
"""
encoded_val = None
# noinspection PyBroadException
# - custom encoding_function exceptions are not known a priori
try:
encoded_val = encoding_function(value)
except ValueError as e:
log.error('Unable to convert %s to %s.', encoded_val, encoding_function)
self._encoding_errors.append({name: value})
except Exception as e:
log.error('Data particle error encoding. Name: %s Value: %s, Encoding: %s', name, value, encoding_function)
self._encoding_errors.append({name: value})
# optional range checking
if value_range:
try:
vmin, vmax = value_range
except ValueError as e: # this only occurs as a programming error and should cause the parser to exit
log.exception('_encode_value must have exactly two values for tuple argument value_range')
raise ValueError(e)
if encoding_function in [int, float]:
if vmin and encoded_val < vmin:
log.error('Particle value (%s) below minimum threshold (%s < %s)', name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and encoded_val > vmax:
log.error('Particle value (%s) exceeds maximum threshold (%s > %s)', name, value, vmax)
self._encoding_errors.append({name: value})
elif hasattr(encoded_val, '__len__'):
try:
if vmin and len(encoded_val) < vmin:
log.error('Particle value (%s) length below minimum threshold (%s < %s)',
name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and len(encoded_val) > vmax:
log.error('Particle value (%s) length exceeds maximum threshold (%s > %s)',
name, value, vmax)
self._encoding_errors.append({name: value})
# in the unlikely event that a range was specified and the encoding object created a bogus len()
# we'll just ignore the range check
except TypeError:
log.warning('_encode_value received an encoding function (%s) that claimed to implement len() but '
'does not. Unable to apply range test to %s', encoding_function, name)
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
class RawDataParticleKey(BaseEnum):
PAYLOAD = "raw"
LENGTH = "length"
TYPE = "type"
CHECKSUM = "checksum"
class RawDataParticle(DataParticle):
"""
This class a common data particle for generating data particles of raw
data.
It essentially is a translation of the port agent packet
"""
_data_particle_type = CommonDataParticleType.RAW
def _build_parsed_values(self):
"""
Build a particle out of a port agent packet.
@returns A list that is ready to be added to the "values" tag before
the structure is JSONified
"""
port_agent_packet = self.raw_data
if not isinstance(port_agent_packet, dict):
raise SampleException("raw data not a dictionary")
for param in ["raw", "length", "type", "checksum"]:
if param not in port_agent_packet:
raise SampleException("raw data not a complete port agent packet. missing %s" % param)
payload = None
length = None
type = None
checksum = None
# Attempt to convert values
try:
payload = base64.b64encode(port_agent_packet.get("raw"))
except TypeError:
pass
try:
length = int(port_agent_packet.get("length"))
except TypeError:
pass
try:
type = int(port_agent_packet.get("type"))
except TypeError:
pass
try:
checksum = int(port_agent_packet.get("checksum"))
except TypeError:
pass
result = [{
DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
DataParticleKey.VALUE: payload,
DataParticleKey.BINARY: True},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
DataParticleKey.VALUE: length},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
DataParticleKey.VALUE: type},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
DataParticleKey.VALUE: checksum},
]
return result
|
[
"#!/usr/bin/env python\n\n\"\"\"\n@package mi.core.instrument.data_particle_generator Base data particle generator\n@file mi/core/instrument/data_particle_generator.py\n@author Steve Foley\n@brief Contains logic to generate data particles to be exchanged between\nthe driver and agent. This involves a JSON interchange format\n\"\"\"\n\nimport time\nimport ntplib\nimport base64\nimport json\n\nfrom mi.core.common import BaseEnum\nfrom mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException\nfrom mi.core.log import get_logger\n\nlog = get_logger()\n\n__author__ = 'Steve Foley'\n__license__ = 'Apache 2.0'\n\n\nclass CommonDataParticleType(BaseEnum):\n \"\"\"\n This enum defines all the common particle types defined in the modules. Currently there is only one, but by\n using an enum here we have the opportunity to define more common data particles.\n \"\"\"\n RAW = \"raw\"\n\n\nclass DataParticleKey(BaseEnum):\n PKT_FORMAT_ID = \"pkt_format_id\"\n PKT_VERSION = \"pkt_version\"\n STREAM_NAME = \"stream_name\"\n INTERNAL_TIMESTAMP = \"internal_timestamp\"\n PORT_TIMESTAMP = \"port_timestamp\"\n DRIVER_TIMESTAMP = \"driver_timestamp\"\n PREFERRED_TIMESTAMP = \"preferred_timestamp\"\n QUALITY_FLAG = \"quality_flag\"\n VALUES = \"values\"\n VALUE_ID = \"value_id\"\n VALUE = \"value\"\n BINARY = \"binary\"\n NEW_SEQUENCE = \"new_sequence\"\n\n\nclass DataParticleValue(BaseEnum):\n JSON_DATA = \"JSON_Data\"\n ENG = \"eng\"\n OK = \"ok\"\n CHECKSUM_FAILED = \"checksum_failed\"\n OUT_OF_RANGE = \"out_of_range\"\n INVALID = \"invalid\"\n QUESTIONABLE = \"questionable\"\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n\n # data particle type is intended to be defined in each derived data particle class. This value should be unique\n # for all data particles. Best practice is to access this variable using the accessor method:\n # data_particle_type()\n _data_particle_type = None\n\n def __init__(self, raw_data,\n port_timestamp=None,\n internal_timestamp=None,\n preferred_timestamp=None,\n quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError(\"new_sequence is not a bool\")\n\n self.contents = {\n DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,\n DataParticleKey.PKT_VERSION: 1,\n DataParticleKey.PORT_TIMESTAMP: port_timestamp,\n DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,\n DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag,\n }\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = .000001\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)\n return False\n\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n\n if (t1 is None) or (t2 is None):\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,\n DataParticleKey.PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing)\n return False\n\n if differing:\n log.error('Value mismatch between particle dictionaries: %r', differing)\n\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = (d1[k], d2[k])\n\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\"timestamp or unix_time required\")\n\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n\n # Do we want this to happen here or in down stream processes?\n # if(not self._check_timestamp(timestamp)):\n # raise InstrumentParameterException(\"invalid timestamp\")\n\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\"timestamp or unix_time required\")\n\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n\n # Do we want this to happen here or in down stream processes?\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException(\"invalid timestamp\")\n\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\"Parameter %s not able to be set to %s after object creation!\" %\n (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException(\"Value %s not available in particle!\", id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException(\"_data_particle_type not initialized\")\n\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n # verify preferred timestamp exists in the structure...\n if not self._check_preferred_timestamps():\n raise SampleException(\"Preferred timestamp not in particle!\")\n\n # build response structure\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException(\"Parsed values block not overridden\")\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n # clean out optional fields that were missing\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n\n # is it sufficiently in the future to be unreasonable?\n if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\"Missing preferred timestamp, %s, in particle\" %\n self.contents[DataParticleKey.PREFERRED_TIMESTAMP])\n\n # This should be handled downstream. Don't want to not publish data because\n # the port agent stopped putting out timestamps\n # if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:\n # raise SampleException(\"Preferred timestamp, %s, is not defined\" %\n # self.contents[DataParticleKey.PREFERRED_TIMESTAMP])\n\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n\n # noinspection PyBroadException\n # - custom encoding_function exceptions are not known a priori\n try:\n encoded_val = encoding_function(value)\n\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val, encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error('Data particle error encoding. Name: %s Value: %s, Encoding: %s', name, value, encoding_function)\n self._encoding_errors.append({name: value})\n\n # optional range checking\n if value_range:\n try:\n vmin, vmax = value_range\n\n except ValueError as e: # this only occurs as a programming error and should cause the parser to exit\n log.exception('_encode_value must have exactly two values for tuple argument value_range')\n raise ValueError(e)\n\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error('Particle value (%s) below minimum threshold (%s < %s)', name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error('Particle value (%s) exceeds maximum threshold (%s > %s)', name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error('Particle value (%s) length below minimum threshold (%s < %s)',\n name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error('Particle value (%s) length exceeds maximum threshold (%s > %s)',\n name, value, vmax)\n self._encoding_errors.append({name: value})\n # in the unlikely event that a range was specified and the encoding object created a bogus len()\n # we'll just ignore the range check\n except TypeError:\n log.warning('_encode_value received an encoding function (%s) that claimed to implement len() but '\n 'does not. Unable to apply range test to %s', encoding_function, name)\n\n return {DataParticleKey.VALUE_ID: name,\n DataParticleKey.VALUE: encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = \"raw\"\n LENGTH = \"length\"\n TYPE = \"type\"\n CHECKSUM = \"checksum\"\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException(\"raw data not a dictionary\")\n\n for param in [\"raw\", \"length\", \"type\", \"checksum\"]:\n if param not in port_agent_packet:\n raise SampleException(\"raw data not a complete port agent packet. missing %s\" % param)\n\n payload = None\n length = None\n type = None\n checksum = None\n\n # Attempt to convert values\n try:\n payload = base64.b64encode(port_agent_packet.get(\"raw\"))\n except TypeError:\n pass\n\n try:\n length = int(port_agent_packet.get(\"length\"))\n except TypeError:\n pass\n\n try:\n type = int(port_agent_packet.get(\"type\"))\n except TypeError:\n pass\n\n try:\n checksum = int(port_agent_packet.get(\"checksum\"))\n except TypeError:\n pass\n\n result = [{\n DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload,\n DataParticleKey.BINARY: True},\n {\n DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length},\n {\n DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,\n DataParticleKey.VALUE: type},\n {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum},\n ]\n\n return result\n",
"<docstring token>\nimport time\nimport ntplib\nimport base64\nimport json\nfrom mi.core.common import BaseEnum\nfrom mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException\nfrom mi.core.log import get_logger\nlog = get_logger()\n__author__ = 'Steve Foley'\n__license__ = 'Apache 2.0'\n\n\nclass CommonDataParticleType(BaseEnum):\n \"\"\"\n This enum defines all the common particle types defined in the modules. Currently there is only one, but by\n using an enum here we have the opportunity to define more common data particles.\n \"\"\"\n RAW = 'raw'\n\n\nclass DataParticleKey(BaseEnum):\n PKT_FORMAT_ID = 'pkt_format_id'\n PKT_VERSION = 'pkt_version'\n STREAM_NAME = 'stream_name'\n INTERNAL_TIMESTAMP = 'internal_timestamp'\n PORT_TIMESTAMP = 'port_timestamp'\n DRIVER_TIMESTAMP = 'driver_timestamp'\n PREFERRED_TIMESTAMP = 'preferred_timestamp'\n QUALITY_FLAG = 'quality_flag'\n VALUES = 'values'\n VALUE_ID = 'value_id'\n VALUE = 'value'\n BINARY = 'binary'\n NEW_SEQUENCE = 'new_sequence'\n\n\nclass DataParticleValue(BaseEnum):\n JSON_DATA = 'JSON_Data'\n ENG = 'eng'\n OK = 'ok'\n CHECKSUM_FAILED = 'checksum_failed'\n OUT_OF_RANGE = 'out_of_range'\n INVALID = 'invalid'\n QUESTIONABLE = 'questionable'\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\nlog = get_logger()\n__author__ = 'Steve Foley'\n__license__ = 'Apache 2.0'\n\n\nclass CommonDataParticleType(BaseEnum):\n \"\"\"\n This enum defines all the common particle types defined in the modules. Currently there is only one, but by\n using an enum here we have the opportunity to define more common data particles.\n \"\"\"\n RAW = 'raw'\n\n\nclass DataParticleKey(BaseEnum):\n PKT_FORMAT_ID = 'pkt_format_id'\n PKT_VERSION = 'pkt_version'\n STREAM_NAME = 'stream_name'\n INTERNAL_TIMESTAMP = 'internal_timestamp'\n PORT_TIMESTAMP = 'port_timestamp'\n DRIVER_TIMESTAMP = 'driver_timestamp'\n PREFERRED_TIMESTAMP = 'preferred_timestamp'\n QUALITY_FLAG = 'quality_flag'\n VALUES = 'values'\n VALUE_ID = 'value_id'\n VALUE = 'value'\n BINARY = 'binary'\n NEW_SEQUENCE = 'new_sequence'\n\n\nclass DataParticleValue(BaseEnum):\n JSON_DATA = 'JSON_Data'\n ENG = 'eng'\n OK = 'ok'\n CHECKSUM_FAILED = 'checksum_failed'\n OUT_OF_RANGE = 'out_of_range'\n INVALID = 'invalid'\n QUESTIONABLE = 'questionable'\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass CommonDataParticleType(BaseEnum):\n \"\"\"\n This enum defines all the common particle types defined in the modules. Currently there is only one, but by\n using an enum here we have the opportunity to define more common data particles.\n \"\"\"\n RAW = 'raw'\n\n\nclass DataParticleKey(BaseEnum):\n PKT_FORMAT_ID = 'pkt_format_id'\n PKT_VERSION = 'pkt_version'\n STREAM_NAME = 'stream_name'\n INTERNAL_TIMESTAMP = 'internal_timestamp'\n PORT_TIMESTAMP = 'port_timestamp'\n DRIVER_TIMESTAMP = 'driver_timestamp'\n PREFERRED_TIMESTAMP = 'preferred_timestamp'\n QUALITY_FLAG = 'quality_flag'\n VALUES = 'values'\n VALUE_ID = 'value_id'\n VALUE = 'value'\n BINARY = 'binary'\n NEW_SEQUENCE = 'new_sequence'\n\n\nclass DataParticleValue(BaseEnum):\n JSON_DATA = 'JSON_Data'\n ENG = 'eng'\n OK = 'ok'\n CHECKSUM_FAILED = 'checksum_failed'\n OUT_OF_RANGE = 'out_of_range'\n INVALID = 'invalid'\n QUESTIONABLE = 'questionable'\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass CommonDataParticleType(BaseEnum):\n <docstring token>\n RAW = 'raw'\n\n\nclass DataParticleKey(BaseEnum):\n PKT_FORMAT_ID = 'pkt_format_id'\n PKT_VERSION = 'pkt_version'\n STREAM_NAME = 'stream_name'\n INTERNAL_TIMESTAMP = 'internal_timestamp'\n PORT_TIMESTAMP = 'port_timestamp'\n DRIVER_TIMESTAMP = 'driver_timestamp'\n PREFERRED_TIMESTAMP = 'preferred_timestamp'\n QUALITY_FLAG = 'quality_flag'\n VALUES = 'values'\n VALUE_ID = 'value_id'\n VALUE = 'value'\n BINARY = 'binary'\n NEW_SEQUENCE = 'new_sequence'\n\n\nclass DataParticleValue(BaseEnum):\n JSON_DATA = 'JSON_Data'\n ENG = 'eng'\n OK = 'ok'\n CHECKSUM_FAILED = 'checksum_failed'\n OUT_OF_RANGE = 'out_of_range'\n INVALID = 'invalid'\n QUESTIONABLE = 'questionable'\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass CommonDataParticleType(BaseEnum):\n <docstring token>\n <assignment token>\n\n\nclass DataParticleKey(BaseEnum):\n PKT_FORMAT_ID = 'pkt_format_id'\n PKT_VERSION = 'pkt_version'\n STREAM_NAME = 'stream_name'\n INTERNAL_TIMESTAMP = 'internal_timestamp'\n PORT_TIMESTAMP = 'port_timestamp'\n DRIVER_TIMESTAMP = 'driver_timestamp'\n PREFERRED_TIMESTAMP = 'preferred_timestamp'\n QUALITY_FLAG = 'quality_flag'\n VALUES = 'values'\n VALUE_ID = 'value_id'\n VALUE = 'value'\n BINARY = 'binary'\n NEW_SEQUENCE = 'new_sequence'\n\n\nclass DataParticleValue(BaseEnum):\n JSON_DATA = 'JSON_Data'\n ENG = 'eng'\n OK = 'ok'\n CHECKSUM_FAILED = 'checksum_failed'\n OUT_OF_RANGE = 'out_of_range'\n INVALID = 'invalid'\n QUESTIONABLE = 'questionable'\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass DataParticleKey(BaseEnum):\n PKT_FORMAT_ID = 'pkt_format_id'\n PKT_VERSION = 'pkt_version'\n STREAM_NAME = 'stream_name'\n INTERNAL_TIMESTAMP = 'internal_timestamp'\n PORT_TIMESTAMP = 'port_timestamp'\n DRIVER_TIMESTAMP = 'driver_timestamp'\n PREFERRED_TIMESTAMP = 'preferred_timestamp'\n QUALITY_FLAG = 'quality_flag'\n VALUES = 'values'\n VALUE_ID = 'value_id'\n VALUE = 'value'\n BINARY = 'binary'\n NEW_SEQUENCE = 'new_sequence'\n\n\nclass DataParticleValue(BaseEnum):\n JSON_DATA = 'JSON_Data'\n ENG = 'eng'\n OK = 'ok'\n CHECKSUM_FAILED = 'checksum_failed'\n OUT_OF_RANGE = 'out_of_range'\n INVALID = 'invalid'\n QUESTIONABLE = 'questionable'\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass DataParticleKey(BaseEnum):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass DataParticleValue(BaseEnum):\n JSON_DATA = 'JSON_Data'\n ENG = 'eng'\n OK = 'ok'\n CHECKSUM_FAILED = 'checksum_failed'\n OUT_OF_RANGE = 'out_of_range'\n INVALID = 'invalid'\n QUESTIONABLE = 'questionable'\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass DataParticleValue(BaseEnum):\n JSON_DATA = 'JSON_Data'\n ENG = 'eng'\n OK = 'ok'\n CHECKSUM_FAILED = 'checksum_failed'\n OUT_OF_RANGE = 'out_of_range'\n INVALID = 'invalid'\n QUESTIONABLE = 'questionable'\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass DataParticleValue(BaseEnum):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n \"\"\"\n This class is responsible for storing and ultimately generating data\n particles in the designated format from the associated inputs. It\n fills in fields as necessary, and is a valid Data Particle\n that can be sent up to the InstrumentAgent.\n\n It is the intent that this class is subclassed as needed if an instrument must\n modify fields in the outgoing packet. The hope is to have most of the superclass\n code be called by the child class with just values overridden as needed.\n \"\"\"\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n _data_particle_type = None\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n\n def _build_base_structure(self):\n \"\"\"\n Build the base/header information for an output structure.\n Follow on methods can then modify it by adding or editing values.\n\n @return A fresh copy of a core structure to be exported\n \"\"\"\n result = dict(self.contents)\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n\n def _build_parsed_values(self):\n \"\"\"\n Build values of a parsed structure. Just the values are built so\n so that a child class can override this class, but call it with\n super() to get the base structure before modification\n\n @return the values tag for this data structure ready to JSONify\n @raises SampleException when parsed values can not be properly returned\n \"\"\"\n raise SampleException('Parsed values block not overridden')\n <function token>\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n\n def _check_preferred_timestamps(self):\n \"\"\"\n Check to make sure the preferred timestamp indicated in the\n particle is actually listed, possibly adjusting to 2nd best\n if not there.\n\n @throws SampleException When there is a problem with the preferred\n timestamp in the sample.\n \"\"\"\n if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:\n raise SampleException(\n 'Missing preferred timestamp, %s, in particle' % self.\n contents[DataParticleKey.PREFERRED_TIMESTAMP])\n return True\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n\n def set_port_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the port timestamp\n @param timestamp: NTP timestamp to set\n @param unix_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n if not self._check_timestamp(timestamp):\n raise InstrumentParameterException('invalid timestamp')\n self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n <function token>\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n\n def __init__(self, raw_data, port_timestamp=None, internal_timestamp=\n None, preferred_timestamp=None, quality_flag=DataParticleValue.OK,\n new_sequence=None):\n \"\"\" Build a particle seeded with appropriate information\n\n @param raw_data The raw data used in the particle\n \"\"\"\n if new_sequence is not None and not isinstance(new_sequence, bool):\n raise TypeError('new_sequence is not a bool')\n self.contents = {DataParticleKey.PKT_FORMAT_ID: DataParticleValue.\n JSON_DATA, DataParticleKey.PKT_VERSION: 1, DataParticleKey.\n PORT_TIMESTAMP: port_timestamp, DataParticleKey.\n INTERNAL_TIMESTAMP: internal_timestamp, DataParticleKey.\n DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),\n DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,\n DataParticleKey.QUALITY_FLAG: quality_flag}\n self._encoding_errors = []\n if new_sequence is not None:\n self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence\n self.raw_data = raw_data\n self._values = None\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n <function token>\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n <function token>\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n <function token>\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n\n def _check_timestamp(self, timestamp):\n \"\"\"\n Check to make sure the timestamp is reasonable\n\n @param timestamp An NTP4 formatted timestamp (64bit)\n @return True if timestamp is okay or None, False otherwise\n \"\"\"\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n if timestamp > ntplib.system_to_ntp_time(time.time() + 86400 * 365):\n return False\n else:\n return True\n <function token>\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n <function token>\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _encode_value(self, name, value, encoding_function, value_range=None):\n \"\"\"\n Encode a value using the encoding function, if it fails store the error in a queue\n\n :param value_range tuple containing min/max numerical values or min/max lengths\n \"\"\"\n encoded_val = None\n try:\n encoded_val = encoding_function(value)\n except ValueError as e:\n log.error('Unable to convert %s to %s.', encoded_val,\n encoding_function)\n self._encoding_errors.append({name: value})\n except Exception as e:\n log.error(\n 'Data particle error encoding. Name: %s Value: %s, Encoding: %s'\n , name, value, encoding_function)\n self._encoding_errors.append({name: value})\n if value_range:\n try:\n vmin, vmax = value_range\n except ValueError as e:\n log.exception(\n '_encode_value must have exactly two values for tuple argument value_range'\n )\n raise ValueError(e)\n if encoding_function in [int, float]:\n if vmin and encoded_val < vmin:\n log.error(\n 'Particle value (%s) below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and encoded_val > vmax:\n log.error(\n 'Particle value (%s) exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n elif hasattr(encoded_val, '__len__'):\n try:\n if vmin and len(encoded_val) < vmin:\n log.error(\n 'Particle value (%s) length below minimum threshold (%s < %s)'\n , name, value, vmin)\n self._encoding_errors.append({name: value})\n elif vmax and len(encoded_val) > vmax:\n log.error(\n 'Particle value (%s) length exceeds maximum threshold (%s > %s)'\n , name, value, vmax)\n self._encoding_errors.append({name: value})\n except TypeError:\n log.warning(\n '_encode_value received an encoding function (%s) that claimed to implement len() but does not. Unable to apply range test to %s'\n , encoding_function, name)\n return {DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE:\n encoded_val}\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n <function token>\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n\n def generate_dict(self):\n \"\"\"\n Generate a simple dictionary of sensor data and timestamps, without\n going to JSON. This is useful for the times when JSON is not needed to\n go across an interface. There are times when particles are used\n internally to a component/process/module/etc.\n @retval A python dictionary with the proper timestamps and data values\n @throws InstrumentDriverException if there is a problem wtih the inputs\n \"\"\"\n if not self._check_preferred_timestamps():\n raise SampleException('Preferred timestamp not in particle!')\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n return result\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n <function token>\n\n def set_value(self, id, value):\n \"\"\"\n Set a content value, restricted as necessary\n\n @param id The ID of the value to set, should be from DataParticleKey\n @param value The value to set\n @raises ReadOnlyException If the parameter cannot be set\n \"\"\"\n if id == DataParticleKey.INTERNAL_TIMESTAMP and self._check_timestamp(\n value):\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value\n else:\n raise ReadOnlyException(\n 'Parameter %s not able to be set to %s after object creation!'\n % (id, value))\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n <function token>\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n <function token>\n <function token>\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n\n def data_particle_type(self):\n \"\"\"\n Return the data particle type (aka stream name)\n @raise: NotImplementedException if _data_particle_type is not set\n \"\"\"\n if self._data_particle_type is None:\n raise NotImplementedException('_data_particle_type not initialized'\n )\n return self._data_particle_type\n <function token>\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n\n def set_internal_timestamp(self, timestamp=None, unix_time=None):\n \"\"\"\n Set the internal timestamp\n @param timestamp: NTP timestamp to set\n @param unit_time: Unix time as returned from time.time()\n @raise InstrumentParameterException if timestamp or unix_time not supplied\n \"\"\"\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\n 'timestamp or unix_time required')\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)\n <function token>\n <function token>\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n <function token>\n <function token>\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n <function token>\n <function token>\n <function token>\n\n def get_value(self, id):\n \"\"\" Return a stored value from contents\n\n @param id The ID (from DataParticleKey) for the parameter to return\n @raises NotImplementedException If there is an invalid id\n \"\"\"\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException('Value %s not available in particle!'\n , id)\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n <function token>\n <function token>\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n <function token>\n <function token>\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_encoding_errors(self):\n \"\"\"\n Return the encoding errors list\n \"\"\"\n return self._encoding_errors\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n\n @staticmethod\n def _compare(d1, d2, ignore_keys=None):\n ignore_keys = ignore_keys if ignore_keys else []\n missing = set(d1).symmetric_difference(d2)\n differing = {}\n for k in d1:\n if k in ignore_keys or k in missing:\n continue\n if d1[k] != d2[k]:\n differing[k] = d1[k], d2[k]\n return missing, differing\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n <function token>\n <function token>\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def __eq__(self, arg):\n \"\"\"\n Quick equality check for testing purposes. If they have the same raw\n data, timestamp, they are the same enough for this particle\n \"\"\"\n allowed_diff = 1e-06\n if self._data_particle_type != arg._data_particle_type:\n log.debug('Data particle type does not match: %s %s', self.\n _data_particle_type, arg._data_particle_type)\n return False\n if self.raw_data != arg.raw_data:\n log.debug('Raw data does not match')\n return False\n t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]\n if t1 is None or t2 is None:\n tdiff = allowed_diff\n else:\n tdiff = abs(t1 - t2)\n if tdiff > allowed_diff:\n log.debug('Timestamp %s does not match %s', t1, t2)\n return False\n generated1 = json.loads(self.generate())\n generated2 = json.loads(arg.generate())\n missing, differing = self._compare(generated1, generated2,\n ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP, DataParticleKey.\n PREFERRED_TIMESTAMP])\n if missing:\n log.error('Key mismatch between particle dictionaries: %r', missing\n )\n return False\n if differing:\n log.error('Value mismatch between particle dictionaries: %r',\n differing)\n return True\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n <function token>\n <function token>\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n <function token>\n <function token>\n\n def generate(self, sorted=False):\n \"\"\"\n Generates a JSON_parsed packet from a sample dictionary of sensor data and\n associates a timestamp with it\n\n @param sorted Returned sorted json dict, useful for testing, but slow,\n so dont do it unless it is important\n @return A JSON_raw string, properly structured with port agent time stamp\n and driver timestamp\n @throws InstrumentDriverException If there is a problem with the inputs\n \"\"\"\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_value_from_values(self, value_id):\n \"\"\" Return a stored value from values list\n\n @param value_id The ID of the parameter to return\n \"\"\"\n if not self._values:\n return None\n values = [i for i in self._values if i[DataParticleKey.VALUE_ID] ==\n value_id]\n if not values:\n return None\n return values[0][DataParticleKey.VALUE]\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass DataParticle(object):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RawDataParticleKey(BaseEnum):\n PAYLOAD = 'raw'\n LENGTH = 'length'\n TYPE = 'type'\n CHECKSUM = 'checksum'\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RawDataParticleKey(BaseEnum):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RawDataParticle(DataParticle):\n \"\"\"\n This class a common data particle for generating data particles of raw\n data.\n\n It essentially is a translation of the port agent packet\n \"\"\"\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RawDataParticle(DataParticle):\n <docstring token>\n _data_particle_type = CommonDataParticleType.RAW\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RawDataParticle(DataParticle):\n <docstring token>\n <assignment token>\n\n def _build_parsed_values(self):\n \"\"\"\n Build a particle out of a port agent packet.\n @returns A list that is ready to be added to the \"values\" tag before\n the structure is JSONified\n \"\"\"\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException('raw data not a dictionary')\n for param in ['raw', 'length', 'type', 'checksum']:\n if param not in port_agent_packet:\n raise SampleException(\n 'raw data not a complete port agent packet. missing %s' %\n param)\n payload = None\n length = None\n type = None\n checksum = None\n try:\n payload = base64.b64encode(port_agent_packet.get('raw'))\n except TypeError:\n pass\n try:\n length = int(port_agent_packet.get('length'))\n except TypeError:\n pass\n try:\n type = int(port_agent_packet.get('type'))\n except TypeError:\n pass\n try:\n checksum = int(port_agent_packet.get('checksum'))\n except TypeError:\n pass\n result = [{DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload, DataParticleKey.BINARY: True},\n {DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length}, {DataParticleKey.VALUE_ID:\n RawDataParticleKey.TYPE, DataParticleKey.VALUE: type}, {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum}]\n return result\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RawDataParticle(DataParticle):\n <docstring token>\n <assignment token>\n <function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,642 |
104881b9f4dc1050f922fce4db96978eb5b06c38
|
from __future__ import print_function, division
from functools import partial
from nose.tools import assert_list_equal, assert_equal
from compiler import compile, Code, Loop
lift_str = partial(map, str)
def test_str_identity_holds_after_compile():
test_input = [
'+++--->>>.....<',
'+++---[>.<]...<',
'+++[-+.][>+.]>....<',
'[-+.][>+.]>....<',
'+++[-+.][>+.]',
'+++[-+[++].][>+.]',
'[[[[[[[[++]]]]]]]]',
'[>[>[>[>[>[>[>[++]]]]]]]]'
]
assert_list_equal(
lift_str(map(compile, test_input)),
lift_str(test_input),
)
def test_count():
def assert_compile_count_equal(code, count):
assert_equal(compile(code).n_splits(), count)
programs, counts = zip(
*[
('', 1),
('+', 2),
('++', 3),
('+++', 4),
('[]', 3),
('[][]', 5),
('[][][]', 7),
('[+]', 4),
('[+][+]', 7),
('+[+]', 5),
('+[+]+', 6),
('+[++]+', 7),
('[[]]', 5),
('[[[]]]', 7),
('[+[[]]]', 8),
('[+[+[]]]', 9),
]
)
for program, count in zip(programs, counts):
yield assert_compile_count_equal, program, count
|
[
"from __future__ import print_function, division\n\nfrom functools import partial\nfrom nose.tools import assert_list_equal, assert_equal\n\nfrom compiler import compile, Code, Loop\n\n\nlift_str = partial(map, str)\n\n\ndef test_str_identity_holds_after_compile():\n test_input = [\n '+++--->>>.....<',\n '+++---[>.<]...<',\n '+++[-+.][>+.]>....<',\n '[-+.][>+.]>....<',\n '+++[-+.][>+.]',\n '+++[-+[++].][>+.]',\n '[[[[[[[[++]]]]]]]]',\n '[>[>[>[>[>[>[>[++]]]]]]]]'\n ]\n\n assert_list_equal(\n lift_str(map(compile, test_input)),\n lift_str(test_input),\n )\n\ndef test_count():\n def assert_compile_count_equal(code, count):\n assert_equal(compile(code).n_splits(), count)\n\n programs, counts = zip(\n *[\n ('', 1),\n ('+', 2),\n ('++', 3),\n ('+++', 4),\n ('[]', 3),\n ('[][]', 5),\n ('[][][]', 7),\n ('[+]', 4),\n ('[+][+]', 7),\n ('+[+]', 5),\n ('+[+]+', 6),\n ('+[++]+', 7),\n ('[[]]', 5),\n ('[[[]]]', 7),\n ('[+[[]]]', 8),\n ('[+[+[]]]', 9),\n ]\n )\n\n for program, count in zip(programs, counts):\n yield assert_compile_count_equal, program, count\n",
"from __future__ import print_function, division\nfrom functools import partial\nfrom nose.tools import assert_list_equal, assert_equal\nfrom compiler import compile, Code, Loop\nlift_str = partial(map, str)\n\n\ndef test_str_identity_holds_after_compile():\n test_input = ['+++--->>>.....<', '+++---[>.<]...<',\n '+++[-+.][>+.]>....<', '[-+.][>+.]>....<', '+++[-+.][>+.]',\n '+++[-+[++].][>+.]', '[[[[[[[[++]]]]]]]]', '[>[>[>[>[>[>[>[++]]]]]]]]']\n assert_list_equal(lift_str(map(compile, test_input)), lift_str(test_input))\n\n\ndef test_count():\n\n def assert_compile_count_equal(code, count):\n assert_equal(compile(code).n_splits(), count)\n programs, counts = zip(*[('', 1), ('+', 2), ('++', 3), ('+++', 4), (\n '[]', 3), ('[][]', 5), ('[][][]', 7), ('[+]', 4), ('[+][+]', 7), (\n '+[+]', 5), ('+[+]+', 6), ('+[++]+', 7), ('[[]]', 5), ('[[[]]]', 7),\n ('[+[[]]]', 8), ('[+[+[]]]', 9)])\n for program, count in zip(programs, counts):\n yield assert_compile_count_equal, program, count\n",
"<import token>\nlift_str = partial(map, str)\n\n\ndef test_str_identity_holds_after_compile():\n test_input = ['+++--->>>.....<', '+++---[>.<]...<',\n '+++[-+.][>+.]>....<', '[-+.][>+.]>....<', '+++[-+.][>+.]',\n '+++[-+[++].][>+.]', '[[[[[[[[++]]]]]]]]', '[>[>[>[>[>[>[>[++]]]]]]]]']\n assert_list_equal(lift_str(map(compile, test_input)), lift_str(test_input))\n\n\ndef test_count():\n\n def assert_compile_count_equal(code, count):\n assert_equal(compile(code).n_splits(), count)\n programs, counts = zip(*[('', 1), ('+', 2), ('++', 3), ('+++', 4), (\n '[]', 3), ('[][]', 5), ('[][][]', 7), ('[+]', 4), ('[+][+]', 7), (\n '+[+]', 5), ('+[+]+', 6), ('+[++]+', 7), ('[[]]', 5), ('[[[]]]', 7),\n ('[+[[]]]', 8), ('[+[+[]]]', 9)])\n for program, count in zip(programs, counts):\n yield assert_compile_count_equal, program, count\n",
"<import token>\n<assignment token>\n\n\ndef test_str_identity_holds_after_compile():\n test_input = ['+++--->>>.....<', '+++---[>.<]...<',\n '+++[-+.][>+.]>....<', '[-+.][>+.]>....<', '+++[-+.][>+.]',\n '+++[-+[++].][>+.]', '[[[[[[[[++]]]]]]]]', '[>[>[>[>[>[>[>[++]]]]]]]]']\n assert_list_equal(lift_str(map(compile, test_input)), lift_str(test_input))\n\n\ndef test_count():\n\n def assert_compile_count_equal(code, count):\n assert_equal(compile(code).n_splits(), count)\n programs, counts = zip(*[('', 1), ('+', 2), ('++', 3), ('+++', 4), (\n '[]', 3), ('[][]', 5), ('[][][]', 7), ('[+]', 4), ('[+][+]', 7), (\n '+[+]', 5), ('+[+]+', 6), ('+[++]+', 7), ('[[]]', 5), ('[[[]]]', 7),\n ('[+[[]]]', 8), ('[+[+[]]]', 9)])\n for program, count in zip(programs, counts):\n yield assert_compile_count_equal, program, count\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef test_count():\n\n def assert_compile_count_equal(code, count):\n assert_equal(compile(code).n_splits(), count)\n programs, counts = zip(*[('', 1), ('+', 2), ('++', 3), ('+++', 4), (\n '[]', 3), ('[][]', 5), ('[][][]', 7), ('[+]', 4), ('[+][+]', 7), (\n '+[+]', 5), ('+[+]+', 6), ('+[++]+', 7), ('[[]]', 5), ('[[[]]]', 7),\n ('[+[[]]]', 8), ('[+[+[]]]', 9)])\n for program, count in zip(programs, counts):\n yield assert_compile_count_equal, program, count\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n"
] | false |
98,643 |
14ca4658a0b7e9f2a77de29cb2a12cab4ac90edd
|
# -*- coding: utf-8 -*-
import os
import datetime
import logging
import re
import config
from classes import *
from detalhe_clube import DetalheClube
from lib import gera_icc_para_jogo
from lib.detalhe_icc import DetalheICC
class DetalheClubeIndices(DetalheClube):
# memcache vars
cache_namespace = "detalhe_clube_indices"
render_this_page_without_main = True
def get(self):
self.decontaminate_vars()
self.checkCacheFreshen()
self.requestHandler()
return
def renderDados(self):
# obter a lista de lances ordenadinhos
lista_lances = Lance.gql("WHERE lan_epoca = :1 and lan_clubes = :2 ORDER by lan_data, lan_nome", self.epoca, self.clube.key()).fetch(1000)
# obter a fonte dos lances -- usar a época toda!
acu_jornadas = {}
acumuladores = AcumuladorJornada.all().filter("acuj_epoca = ", self.epoca).filter("acuj_versao = ", config.VERSAO_ACUMULADOR)
for acu in acumuladores:
acu_jornadas[acu.acuj_jornada.jor_nome] = acu.acuj_content
detalhe_icc = DetalheICC()
detalhe_icc.setLances(lista_lances)
detalhe_icc.setClube(self.clube)
detalhe_icc.setAcumuladoresJornadas(acu_jornadas)
resultados = detalhe_icc.gera()
jogos = resultados["jogos"]
total_icc_beneficio = resultados["total_icc_beneficio"]
total_icc_prejuizo = resultados["total_icc_prejuizo"]
total_icc = (total_icc_beneficio + total_icc_prejuizo)
return {"jogos":jogos, "total_icc_beneficio":total_icc_beneficio,
"total_icc_prejuizo":total_icc_prejuizo, "total_icc":total_icc}
def renderHTML(self):
html = self.render_subdir('clube','detalhe_clube_indices.html', {
"detalhe_icc_dados":self.dados,
"clube":self.clube,
"epoca":self.epoca,
"data":datetime.datetime.now()
})
return html
|
[
"# -*- coding: utf-8 -*-\n\nimport os\nimport datetime\nimport logging\nimport re\nimport config \n\nfrom classes import *\nfrom detalhe_clube import DetalheClube\nfrom lib import gera_icc_para_jogo\nfrom lib.detalhe_icc import DetalheICC\n\nclass DetalheClubeIndices(DetalheClube):\n\t\t\n\t# memcache vars\n\tcache_namespace = \"detalhe_clube_indices\"\n\trender_this_page_without_main = True\n\n\tdef get(self):\n\t\tself.decontaminate_vars()\n\t\tself.checkCacheFreshen()\n\t\tself.requestHandler()\n\t\treturn \n\n\tdef renderDados(self):\n\t\t\n\t\t# obter a lista de lances ordenadinhos\n\t\tlista_lances = Lance.gql(\"WHERE lan_epoca = :1 and lan_clubes = :2 ORDER by lan_data, lan_nome\", self.epoca, self.clube.key()).fetch(1000)\n\n\t\t# obter a fonte dos lances -- usar a época toda!\n\t\tacu_jornadas = {}\n\t\tacumuladores = AcumuladorJornada.all().filter(\"acuj_epoca = \", self.epoca).filter(\"acuj_versao = \", config.VERSAO_ACUMULADOR)\n\t\tfor acu in acumuladores:\n\t\t\tacu_jornadas[acu.acuj_jornada.jor_nome] = acu.acuj_content\n\t\t\n\t\tdetalhe_icc = DetalheICC()\n\t\tdetalhe_icc.setLances(lista_lances)\n\t\tdetalhe_icc.setClube(self.clube)\n\t\tdetalhe_icc.setAcumuladoresJornadas(acu_jornadas)\n\t\tresultados = detalhe_icc.gera()\n\t\t\n\t\tjogos = resultados[\"jogos\"]\n\t\ttotal_icc_beneficio = resultados[\"total_icc_beneficio\"]\n\t\ttotal_icc_prejuizo = resultados[\"total_icc_prejuizo\"]\n\t\ttotal_icc = (total_icc_beneficio + total_icc_prejuizo)\n\t\t\n\t\treturn {\"jogos\":jogos, \"total_icc_beneficio\":total_icc_beneficio, \n\t\t\t\"total_icc_prejuizo\":total_icc_prejuizo, \"total_icc\":total_icc}\n\t\t\n\tdef renderHTML(self):\n\t\t\n\t\thtml = self.render_subdir('clube','detalhe_clube_indices.html', {\n\t\t\t\"detalhe_icc_dados\":self.dados,\n\t\t\t\"clube\":self.clube,\n\t\t\t\"epoca\":self.epoca,\n\t\t\t\"data\":datetime.datetime.now()\n\t\t})\n\t\n\t\treturn html",
"import os\nimport datetime\nimport logging\nimport re\nimport config\nfrom classes import *\nfrom detalhe_clube import DetalheClube\nfrom lib import gera_icc_para_jogo\nfrom lib.detalhe_icc import DetalheICC\n\n\nclass DetalheClubeIndices(DetalheClube):\n cache_namespace = 'detalhe_clube_indices'\n render_this_page_without_main = True\n\n def get(self):\n self.decontaminate_vars()\n self.checkCacheFreshen()\n self.requestHandler()\n return\n\n def renderDados(self):\n lista_lances = Lance.gql(\n 'WHERE lan_epoca = :1 and lan_clubes = :2 ORDER by lan_data, lan_nome'\n , self.epoca, self.clube.key()).fetch(1000)\n acu_jornadas = {}\n acumuladores = AcumuladorJornada.all().filter('acuj_epoca = ', self\n .epoca).filter('acuj_versao = ', config.VERSAO_ACUMULADOR)\n for acu in acumuladores:\n acu_jornadas[acu.acuj_jornada.jor_nome] = acu.acuj_content\n detalhe_icc = DetalheICC()\n detalhe_icc.setLances(lista_lances)\n detalhe_icc.setClube(self.clube)\n detalhe_icc.setAcumuladoresJornadas(acu_jornadas)\n resultados = detalhe_icc.gera()\n jogos = resultados['jogos']\n total_icc_beneficio = resultados['total_icc_beneficio']\n total_icc_prejuizo = resultados['total_icc_prejuizo']\n total_icc = total_icc_beneficio + total_icc_prejuizo\n return {'jogos': jogos, 'total_icc_beneficio': total_icc_beneficio,\n 'total_icc_prejuizo': total_icc_prejuizo, 'total_icc': total_icc}\n\n def renderHTML(self):\n html = self.render_subdir('clube', 'detalhe_clube_indices.html', {\n 'detalhe_icc_dados': self.dados, 'clube': self.clube, 'epoca':\n self.epoca, 'data': datetime.datetime.now()})\n return html\n",
"<import token>\n\n\nclass DetalheClubeIndices(DetalheClube):\n cache_namespace = 'detalhe_clube_indices'\n render_this_page_without_main = True\n\n def get(self):\n self.decontaminate_vars()\n self.checkCacheFreshen()\n self.requestHandler()\n return\n\n def renderDados(self):\n lista_lances = Lance.gql(\n 'WHERE lan_epoca = :1 and lan_clubes = :2 ORDER by lan_data, lan_nome'\n , self.epoca, self.clube.key()).fetch(1000)\n acu_jornadas = {}\n acumuladores = AcumuladorJornada.all().filter('acuj_epoca = ', self\n .epoca).filter('acuj_versao = ', config.VERSAO_ACUMULADOR)\n for acu in acumuladores:\n acu_jornadas[acu.acuj_jornada.jor_nome] = acu.acuj_content\n detalhe_icc = DetalheICC()\n detalhe_icc.setLances(lista_lances)\n detalhe_icc.setClube(self.clube)\n detalhe_icc.setAcumuladoresJornadas(acu_jornadas)\n resultados = detalhe_icc.gera()\n jogos = resultados['jogos']\n total_icc_beneficio = resultados['total_icc_beneficio']\n total_icc_prejuizo = resultados['total_icc_prejuizo']\n total_icc = total_icc_beneficio + total_icc_prejuizo\n return {'jogos': jogos, 'total_icc_beneficio': total_icc_beneficio,\n 'total_icc_prejuizo': total_icc_prejuizo, 'total_icc': total_icc}\n\n def renderHTML(self):\n html = self.render_subdir('clube', 'detalhe_clube_indices.html', {\n 'detalhe_icc_dados': self.dados, 'clube': self.clube, 'epoca':\n self.epoca, 'data': datetime.datetime.now()})\n return html\n",
"<import token>\n\n\nclass DetalheClubeIndices(DetalheClube):\n <assignment token>\n <assignment token>\n\n def get(self):\n self.decontaminate_vars()\n self.checkCacheFreshen()\n self.requestHandler()\n return\n\n def renderDados(self):\n lista_lances = Lance.gql(\n 'WHERE lan_epoca = :1 and lan_clubes = :2 ORDER by lan_data, lan_nome'\n , self.epoca, self.clube.key()).fetch(1000)\n acu_jornadas = {}\n acumuladores = AcumuladorJornada.all().filter('acuj_epoca = ', self\n .epoca).filter('acuj_versao = ', config.VERSAO_ACUMULADOR)\n for acu in acumuladores:\n acu_jornadas[acu.acuj_jornada.jor_nome] = acu.acuj_content\n detalhe_icc = DetalheICC()\n detalhe_icc.setLances(lista_lances)\n detalhe_icc.setClube(self.clube)\n detalhe_icc.setAcumuladoresJornadas(acu_jornadas)\n resultados = detalhe_icc.gera()\n jogos = resultados['jogos']\n total_icc_beneficio = resultados['total_icc_beneficio']\n total_icc_prejuizo = resultados['total_icc_prejuizo']\n total_icc = total_icc_beneficio + total_icc_prejuizo\n return {'jogos': jogos, 'total_icc_beneficio': total_icc_beneficio,\n 'total_icc_prejuizo': total_icc_prejuizo, 'total_icc': total_icc}\n\n def renderHTML(self):\n html = self.render_subdir('clube', 'detalhe_clube_indices.html', {\n 'detalhe_icc_dados': self.dados, 'clube': self.clube, 'epoca':\n self.epoca, 'data': datetime.datetime.now()})\n return html\n",
"<import token>\n\n\nclass DetalheClubeIndices(DetalheClube):\n <assignment token>\n <assignment token>\n <function token>\n\n def renderDados(self):\n lista_lances = Lance.gql(\n 'WHERE lan_epoca = :1 and lan_clubes = :2 ORDER by lan_data, lan_nome'\n , self.epoca, self.clube.key()).fetch(1000)\n acu_jornadas = {}\n acumuladores = AcumuladorJornada.all().filter('acuj_epoca = ', self\n .epoca).filter('acuj_versao = ', config.VERSAO_ACUMULADOR)\n for acu in acumuladores:\n acu_jornadas[acu.acuj_jornada.jor_nome] = acu.acuj_content\n detalhe_icc = DetalheICC()\n detalhe_icc.setLances(lista_lances)\n detalhe_icc.setClube(self.clube)\n detalhe_icc.setAcumuladoresJornadas(acu_jornadas)\n resultados = detalhe_icc.gera()\n jogos = resultados['jogos']\n total_icc_beneficio = resultados['total_icc_beneficio']\n total_icc_prejuizo = resultados['total_icc_prejuizo']\n total_icc = total_icc_beneficio + total_icc_prejuizo\n return {'jogos': jogos, 'total_icc_beneficio': total_icc_beneficio,\n 'total_icc_prejuizo': total_icc_prejuizo, 'total_icc': total_icc}\n\n def renderHTML(self):\n html = self.render_subdir('clube', 'detalhe_clube_indices.html', {\n 'detalhe_icc_dados': self.dados, 'clube': self.clube, 'epoca':\n self.epoca, 'data': datetime.datetime.now()})\n return html\n",
"<import token>\n\n\nclass DetalheClubeIndices(DetalheClube):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def renderHTML(self):\n html = self.render_subdir('clube', 'detalhe_clube_indices.html', {\n 'detalhe_icc_dados': self.dados, 'clube': self.clube, 'epoca':\n self.epoca, 'data': datetime.datetime.now()})\n return html\n",
"<import token>\n\n\nclass DetalheClubeIndices(DetalheClube):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,644 |
3d7275540d8854f4a17b6f8b050df258a110e231
|
# coding: utf8
from __future__ import unicode_literals, print_function, division
from unittest import TestCase
from mock import Mock
class Tests(TestCase):
def test_validators(self):
from pytsammalex.models import regex_validator, convert_date, valid_coordinates
with self.assertRaises(ValueError):
regex_validator('[a-z]+', None, Mock(), '')
regex_validator('[a-z]+', None, Mock(), '', allow_empty=True)
with self.assertRaises(ValueError):
convert_date('2008Jan21')
with self.assertRaises(ValueError):
valid_coordinates(None, Mock(), (91.0, 45.0))
def test_Images(self):
from pytsammalex.models import Images
row = "47a4a90a6fb76d027e7db2d383973e1d,motacillaflava,general;thumbnail1," \
"image/jpeg,,,2008-12-28,,50.567634 30.301384," \
"https://creativecommons.org/," \
"http://commons.wikimedia.org/wiki/File:Motacilla_flava_Horenka2.jpg,"\
.split(',')
self.assertEqual(Images(*row).csv_row(), row)
|
[
"# coding: utf8\nfrom __future__ import unicode_literals, print_function, division\nfrom unittest import TestCase\n\nfrom mock import Mock\n\n\nclass Tests(TestCase):\n def test_validators(self):\n from pytsammalex.models import regex_validator, convert_date, valid_coordinates\n\n with self.assertRaises(ValueError):\n regex_validator('[a-z]+', None, Mock(), '')\n regex_validator('[a-z]+', None, Mock(), '', allow_empty=True)\n\n with self.assertRaises(ValueError):\n convert_date('2008Jan21')\n\n with self.assertRaises(ValueError):\n valid_coordinates(None, Mock(), (91.0, 45.0))\n\n def test_Images(self):\n from pytsammalex.models import Images\n\n row = \"47a4a90a6fb76d027e7db2d383973e1d,motacillaflava,general;thumbnail1,\" \\\n \"image/jpeg,,,2008-12-28,,50.567634 30.301384,\" \\\n \"https://creativecommons.org/,\" \\\n \"http://commons.wikimedia.org/wiki/File:Motacilla_flava_Horenka2.jpg,\"\\\n .split(',')\n self.assertEqual(Images(*row).csv_row(), row)\n",
"from __future__ import unicode_literals, print_function, division\nfrom unittest import TestCase\nfrom mock import Mock\n\n\nclass Tests(TestCase):\n\n def test_validators(self):\n from pytsammalex.models import regex_validator, convert_date, valid_coordinates\n with self.assertRaises(ValueError):\n regex_validator('[a-z]+', None, Mock(), '')\n regex_validator('[a-z]+', None, Mock(), '', allow_empty=True)\n with self.assertRaises(ValueError):\n convert_date('2008Jan21')\n with self.assertRaises(ValueError):\n valid_coordinates(None, Mock(), (91.0, 45.0))\n\n def test_Images(self):\n from pytsammalex.models import Images\n row = (\n '47a4a90a6fb76d027e7db2d383973e1d,motacillaflava,general;thumbnail1,image/jpeg,,,2008-12-28,,50.567634 30.301384,https://creativecommons.org/,http://commons.wikimedia.org/wiki/File:Motacilla_flava_Horenka2.jpg,'\n .split(','))\n self.assertEqual(Images(*row).csv_row(), row)\n",
"<import token>\n\n\nclass Tests(TestCase):\n\n def test_validators(self):\n from pytsammalex.models import regex_validator, convert_date, valid_coordinates\n with self.assertRaises(ValueError):\n regex_validator('[a-z]+', None, Mock(), '')\n regex_validator('[a-z]+', None, Mock(), '', allow_empty=True)\n with self.assertRaises(ValueError):\n convert_date('2008Jan21')\n with self.assertRaises(ValueError):\n valid_coordinates(None, Mock(), (91.0, 45.0))\n\n def test_Images(self):\n from pytsammalex.models import Images\n row = (\n '47a4a90a6fb76d027e7db2d383973e1d,motacillaflava,general;thumbnail1,image/jpeg,,,2008-12-28,,50.567634 30.301384,https://creativecommons.org/,http://commons.wikimedia.org/wiki/File:Motacilla_flava_Horenka2.jpg,'\n .split(','))\n self.assertEqual(Images(*row).csv_row(), row)\n",
"<import token>\n\n\nclass Tests(TestCase):\n <function token>\n\n def test_Images(self):\n from pytsammalex.models import Images\n row = (\n '47a4a90a6fb76d027e7db2d383973e1d,motacillaflava,general;thumbnail1,image/jpeg,,,2008-12-28,,50.567634 30.301384,https://creativecommons.org/,http://commons.wikimedia.org/wiki/File:Motacilla_flava_Horenka2.jpg,'\n .split(','))\n self.assertEqual(Images(*row).csv_row(), row)\n",
"<import token>\n\n\nclass Tests(TestCase):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,645 |
f64d5a197dac377c606088fcae37c5369c70224a
|
#12. Escreva uma função que recebe um número inteiro e retorna a mensagem “O número é múltiplo de 7” ou “O
#número não é múltiplo de 7”.
def mult(a):
if a % 7 == 0:
return "O número é múltiplo de 7"
else:
return "O número não é múltiplo de 7"
try:
a = int(input("Forneça um valor inteiro: "))
print(mult(a))
except:
print("Não foi fornecido um valor inteiro.")
|
[
"#12. Escreva\tuma\tfunção\tque\trecebe\tum\tnúmero\tinteiro\te\tretorna\ta mensagem\t“O\tnúmero\té\tmúltiplo\tde\t7”\tou\t“O\t\r\n#número\tnão\té\tmúltiplo\tde\t7”.\r\ndef mult(a):\r\n\r\n\tif a % 7 == 0:\r\n\r\n\t\treturn \"O número é múltiplo de 7\"\r\n\r\n\telse:\r\n\r\n\t\treturn \"O número não é múltiplo de 7\"\r\n\r\ntry:\r\n\r\n\ta = int(input(\"Forneça um valor inteiro: \"))\r\n\r\n\tprint(mult(a))\r\n\r\nexcept:\r\n\r\n\tprint(\"Não foi fornecido um valor inteiro.\")\r\n",
"def mult(a):\n if a % 7 == 0:\n return 'O número é múltiplo de 7'\n else:\n return 'O número não é múltiplo de 7'\n\n\ntry:\n a = int(input('Forneça um valor inteiro: '))\n print(mult(a))\nexcept:\n print('Não foi fornecido um valor inteiro.')\n",
"def mult(a):\n if a % 7 == 0:\n return 'O número é múltiplo de 7'\n else:\n return 'O número não é múltiplo de 7'\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
98,646 |
5be35fce8c7f0f7429cddcbb167df3da5141317a
|
"""
@file
@brief Various functions to install some applications such as `Git <http://www.git-scm.com/>`_.
"""
from __future__ import print_function
import sys
import re
import os
from ..installhelper.install_cmd_helper import run_cmd
from .install_custom import download_page, download_file
def install_git(
temp_folder=".", fLOG=print, install=True, force_download=False, version=None):
"""
Install `Git <http://www.git-scm.com/>`_.
It does not do it a second time if it is already installed.
@param temp_folder where to download the setup
@param fLOG logging function
@param install install (otherwise only download)
@param force_download force the downloading of Git
@param version specify a version (unused)
@return temporary file
"""
if version is not None:
raise ValueError("cannot specify a version")
if sys.platform.startswith("win"):
link = "http://www.git-scm.com/download/win"
page = download_page(link)
reg = re.compile("href=\\\"(.*?64-bit[.]((msi)|(exe)))\\\"")
alls = reg.findall(page)
if len(alls) == 0:
raise AssertionError(
"unable to find a link on a .msi file on page: " + link + "\n" +
page)
url = alls[0][0]
full = url.split("/")[-1]
outfile = os.path.join(temp_folder, full)
fLOG("[pymy] download ", url)
local = download_file(url, outfile)
if install:
run_cmd("msiexec /i " + local, fLOG=fLOG, wait=True)
return local
else:
raise NotImplementedError("not available on platform " + sys.platform)
|
[
"\"\"\"\n@file\n@brief Various functions to install some applications such as `Git <http://www.git-scm.com/>`_.\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport re\nimport os\n\nfrom ..installhelper.install_cmd_helper import run_cmd\nfrom .install_custom import download_page, download_file\n\n\ndef install_git(\n temp_folder=\".\", fLOG=print, install=True, force_download=False, version=None):\n \"\"\"\n Install `Git <http://www.git-scm.com/>`_.\n It does not do it a second time if it is already installed.\n\n @param temp_folder where to download the setup\n @param fLOG logging function\n @param install install (otherwise only download)\n @param force_download force the downloading of Git\n @param version specify a version (unused)\n @return temporary file\n \"\"\"\n if version is not None:\n raise ValueError(\"cannot specify a version\")\n if sys.platform.startswith(\"win\"):\n link = \"http://www.git-scm.com/download/win\"\n page = download_page(link)\n reg = re.compile(\"href=\\\\\\\"(.*?64-bit[.]((msi)|(exe)))\\\\\\\"\")\n alls = reg.findall(page)\n if len(alls) == 0:\n raise AssertionError(\n \"unable to find a link on a .msi file on page: \" + link + \"\\n\" +\n page)\n\n url = alls[0][0]\n full = url.split(\"/\")[-1]\n outfile = os.path.join(temp_folder, full)\n fLOG(\"[pymy] download \", url)\n local = download_file(url, outfile)\n if install:\n run_cmd(\"msiexec /i \" + local, fLOG=fLOG, wait=True)\n return local\n else:\n raise NotImplementedError(\"not available on platform \" + sys.platform)\n",
"<docstring token>\nfrom __future__ import print_function\nimport sys\nimport re\nimport os\nfrom ..installhelper.install_cmd_helper import run_cmd\nfrom .install_custom import download_page, download_file\n\n\ndef install_git(temp_folder='.', fLOG=print, install=True, force_download=\n False, version=None):\n \"\"\"\n Install `Git <http://www.git-scm.com/>`_.\n It does not do it a second time if it is already installed.\n\n @param temp_folder where to download the setup\n @param fLOG logging function\n @param install install (otherwise only download)\n @param force_download force the downloading of Git\n @param version specify a version (unused)\n @return temporary file\n \"\"\"\n if version is not None:\n raise ValueError('cannot specify a version')\n if sys.platform.startswith('win'):\n link = 'http://www.git-scm.com/download/win'\n page = download_page(link)\n reg = re.compile('href=\\\\\"(.*?64-bit[.]((msi)|(exe)))\\\\\"')\n alls = reg.findall(page)\n if len(alls) == 0:\n raise AssertionError(\n 'unable to find a link on a .msi file on page: ' + link +\n '\\n' + page)\n url = alls[0][0]\n full = url.split('/')[-1]\n outfile = os.path.join(temp_folder, full)\n fLOG('[pymy] download ', url)\n local = download_file(url, outfile)\n if install:\n run_cmd('msiexec /i ' + local, fLOG=fLOG, wait=True)\n return local\n else:\n raise NotImplementedError('not available on platform ' + sys.platform)\n",
"<docstring token>\n<import token>\n\n\ndef install_git(temp_folder='.', fLOG=print, install=True, force_download=\n False, version=None):\n \"\"\"\n Install `Git <http://www.git-scm.com/>`_.\n It does not do it a second time if it is already installed.\n\n @param temp_folder where to download the setup\n @param fLOG logging function\n @param install install (otherwise only download)\n @param force_download force the downloading of Git\n @param version specify a version (unused)\n @return temporary file\n \"\"\"\n if version is not None:\n raise ValueError('cannot specify a version')\n if sys.platform.startswith('win'):\n link = 'http://www.git-scm.com/download/win'\n page = download_page(link)\n reg = re.compile('href=\\\\\"(.*?64-bit[.]((msi)|(exe)))\\\\\"')\n alls = reg.findall(page)\n if len(alls) == 0:\n raise AssertionError(\n 'unable to find a link on a .msi file on page: ' + link +\n '\\n' + page)\n url = alls[0][0]\n full = url.split('/')[-1]\n outfile = os.path.join(temp_folder, full)\n fLOG('[pymy] download ', url)\n local = download_file(url, outfile)\n if install:\n run_cmd('msiexec /i ' + local, fLOG=fLOG, wait=True)\n return local\n else:\n raise NotImplementedError('not available on platform ' + sys.platform)\n",
"<docstring token>\n<import token>\n<function token>\n"
] | false |
98,647 |
02213d34b88b15d7bfee350ec2d3b834e1c817e7
|
list1 = []
list2 = []
N = int(input())
for i in range(0, N):
num = int(input())
list1.append(num)
def count(num):
b = d = num
count = 0
while num != 0:
num //= 10
count += 1
if count > 2:
a = 0
while a <= 1:
d = d / 10
a = a + 1
d = int(d)
c = b - d * 100
list2.append(c)
else:
list2.append(b)
def digits(n):
largest = 0
smallest = 9
while (n):
r = n % 10
smallest = min(r, smallest)
largest = max(r, largest)
n = n // 10
bit = smallest * 7 + largest * 11
count(bit)
for j in list1:
digits(j)
pair = 0
count = 0
for k in range(0, len(list2) - 1):
l = 2
while k + l < len(list2):
if list2[k] == list2[k + l]:
pair += 1
elif int((list2[k]) / 10) == int((list2[k + l]) / 10):
count += 1
l += 2
if count > 2:
pair = pair + 2
elif count == 0:
pair = pair + 0
else:
pair = pair + 1
print(pair)
|
[
"list1 = []\r\nlist2 = []\r\n\r\nN = int(input())\r\n\r\nfor i in range(0, N):\r\n num = int(input())\r\n list1.append(num)\r\n\r\ndef count(num):\r\n b = d = num\r\n count = 0\r\n\r\n while num != 0:\r\n num //= 10\r\n count += 1\r\n\r\n if count > 2:\r\n a = 0\r\n while a <= 1:\r\n d = d / 10\r\n a = a + 1\r\n\r\n d = int(d)\r\n\r\n c = b - d * 100\r\n list2.append(c)\r\n else:\r\n list2.append(b)\r\n\r\n\r\ndef digits(n):\r\n largest = 0\r\n smallest = 9\r\n\r\n while (n):\r\n r = n % 10\r\n\r\n smallest = min(r, smallest)\r\n largest = max(r, largest)\r\n\r\n n = n // 10\r\n\r\n bit = smallest * 7 + largest * 11\r\n count(bit)\r\n\r\nfor j in list1:\r\n digits(j)\r\n\r\npair = 0\r\ncount = 0\r\n\r\nfor k in range(0, len(list2) - 1):\r\n l = 2\r\n\r\n while k + l < len(list2):\r\n if list2[k] == list2[k + l]:\r\n pair += 1\r\n elif int((list2[k]) / 10) == int((list2[k + l]) / 10):\r\n count += 1\r\n l += 2\r\n\r\nif count > 2:\r\n pair = pair + 2\r\nelif count == 0:\r\n pair = pair + 0\r\nelse:\r\n pair = pair + 1\r\nprint(pair)\r\n\r\n\r\n\r\n",
"list1 = []\nlist2 = []\nN = int(input())\nfor i in range(0, N):\n num = int(input())\n list1.append(num)\n\n\ndef count(num):\n b = d = num\n count = 0\n while num != 0:\n num //= 10\n count += 1\n if count > 2:\n a = 0\n while a <= 1:\n d = d / 10\n a = a + 1\n d = int(d)\n c = b - d * 100\n list2.append(c)\n else:\n list2.append(b)\n\n\ndef digits(n):\n largest = 0\n smallest = 9\n while n:\n r = n % 10\n smallest = min(r, smallest)\n largest = max(r, largest)\n n = n // 10\n bit = smallest * 7 + largest * 11\n count(bit)\n\n\nfor j in list1:\n digits(j)\npair = 0\ncount = 0\nfor k in range(0, len(list2) - 1):\n l = 2\n while k + l < len(list2):\n if list2[k] == list2[k + l]:\n pair += 1\n elif int(list2[k] / 10) == int(list2[k + l] / 10):\n count += 1\n l += 2\nif count > 2:\n pair = pair + 2\nelif count == 0:\n pair = pair + 0\nelse:\n pair = pair + 1\nprint(pair)\n",
"<assignment token>\nfor i in range(0, N):\n num = int(input())\n list1.append(num)\n\n\ndef count(num):\n b = d = num\n count = 0\n while num != 0:\n num //= 10\n count += 1\n if count > 2:\n a = 0\n while a <= 1:\n d = d / 10\n a = a + 1\n d = int(d)\n c = b - d * 100\n list2.append(c)\n else:\n list2.append(b)\n\n\ndef digits(n):\n largest = 0\n smallest = 9\n while n:\n r = n % 10\n smallest = min(r, smallest)\n largest = max(r, largest)\n n = n // 10\n bit = smallest * 7 + largest * 11\n count(bit)\n\n\nfor j in list1:\n digits(j)\n<assignment token>\nfor k in range(0, len(list2) - 1):\n l = 2\n while k + l < len(list2):\n if list2[k] == list2[k + l]:\n pair += 1\n elif int(list2[k] / 10) == int(list2[k + l] / 10):\n count += 1\n l += 2\nif count > 2:\n pair = pair + 2\nelif count == 0:\n pair = pair + 0\nelse:\n pair = pair + 1\nprint(pair)\n",
"<assignment token>\n<code token>\n\n\ndef count(num):\n b = d = num\n count = 0\n while num != 0:\n num //= 10\n count += 1\n if count > 2:\n a = 0\n while a <= 1:\n d = d / 10\n a = a + 1\n d = int(d)\n c = b - d * 100\n list2.append(c)\n else:\n list2.append(b)\n\n\ndef digits(n):\n largest = 0\n smallest = 9\n while n:\n r = n % 10\n smallest = min(r, smallest)\n largest = max(r, largest)\n n = n // 10\n bit = smallest * 7 + largest * 11\n count(bit)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<assignment token>\n<code token>\n\n\ndef count(num):\n b = d = num\n count = 0\n while num != 0:\n num //= 10\n count += 1\n if count > 2:\n a = 0\n while a <= 1:\n d = d / 10\n a = a + 1\n d = int(d)\n c = b - d * 100\n list2.append(c)\n else:\n list2.append(b)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<assignment token>\n<code token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,648 |
b21cc3b09fa15125254c5e08fbacf0ac23489a03
|
"""
1029
"""
from typing import List
import heapq
class Solution:
# clarification
# any restrictions on time/space complexity? (e.g. timeout/memory...)
# upper/lower bound of costs[i]?
# length of costs?
# is is possible to have multiple distributions that meets the same cost? (e.g. [[1,1],[2,2]] )
# sort approach
# need to sort costs with diff of (cost_a - cost_b)
# assume everyone goes to city A, then pick n people to city B which can save most money
# python has lambda syntax, (e.g. lambda x, y = x-y), using timsort (hybrid with insertion+mergesort)
# time complexity: timsort avg case O(2Nlog2N), best case O(2N), worst case O(2Nlog2N)
# space complexity: O(2N) for timsort with diff array (is hidden under sort lambda syntax)
def twoCitySchedCost_sort(self, costs: List[List[int]]) -> int:
costs.sort(key=lambda x: x[1]-x[0])
N = len(costs)//2
ans = 0
for i in range(N):
ans += costs[i][1]
for j in range(N, 2*N):
ans += costs[j][0]
return ans
# heap approach
# pick while go through the array, meanwhile maintain the heap size (which is smaller than the range of sorting)
# put (diff, index) into heap
# time complexity: O(2N*logN), for 2N elements, each operate heap insert O(logN), heappop O(logN)
# space complexity: O(N) for heap, diff is used in heap
def twoCitySchedCost(self, costs: List[List[int]]) -> int:
N = len(costs)//2
hp = []
ans = 0
for i, c in enumerate(costs):
cost_a, cost_b = c
heapq.heappush(hp, cost_b-cost_a)
ans += cost_a # assume everyone goes to city A
if len(hp) > N:
ans += heapq.heappop(hp) # popped is the best person to city B instead of A => switch to B by adding the diff
return ans
|
[
"\"\"\"\n1029\n\"\"\"\nfrom typing import List\nimport heapq\nclass Solution: \n # clarification\n # any restrictions on time/space complexity? (e.g. timeout/memory...)\n # upper/lower bound of costs[i]?\n # length of costs?\n # is is possible to have multiple distributions that meets the same cost? (e.g. [[1,1],[2,2]] )\n \n # sort approach\n # need to sort costs with diff of (cost_a - cost_b)\n # assume everyone goes to city A, then pick n people to city B which can save most money\n # python has lambda syntax, (e.g. lambda x, y = x-y), using timsort (hybrid with insertion+mergesort)\n # time complexity: timsort avg case O(2Nlog2N), best case O(2N), worst case O(2Nlog2N)\n # space complexity: O(2N) for timsort with diff array (is hidden under sort lambda syntax)\n\n def twoCitySchedCost_sort(self, costs: List[List[int]]) -> int:\n costs.sort(key=lambda x: x[1]-x[0])\n N = len(costs)//2\n ans = 0\n for i in range(N):\n ans += costs[i][1]\n for j in range(N, 2*N):\n ans += costs[j][0]\n return ans\n\n # heap approach\n # pick while go through the array, meanwhile maintain the heap size (which is smaller than the range of sorting)\n # put (diff, index) into heap\n # time complexity: O(2N*logN), for 2N elements, each operate heap insert O(logN), heappop O(logN)\n # space complexity: O(N) for heap, diff is used in heap\n\n def twoCitySchedCost(self, costs: List[List[int]]) -> int: \n N = len(costs)//2\n hp = []\n ans = 0\n for i, c in enumerate(costs):\n cost_a, cost_b = c\n heapq.heappush(hp, cost_b-cost_a)\n ans += cost_a # assume everyone goes to city A\n if len(hp) > N:\n ans += heapq.heappop(hp) # popped is the best person to city B instead of A => switch to B by adding the diff\n return ans\n",
"<docstring token>\nfrom typing import List\nimport heapq\n\n\nclass Solution:\n\n def twoCitySchedCost_sort(self, costs: List[List[int]]) ->int:\n costs.sort(key=lambda x: x[1] - x[0])\n N = len(costs) // 2\n ans = 0\n for i in range(N):\n ans += costs[i][1]\n for j in range(N, 2 * N):\n ans += costs[j][0]\n return ans\n\n def twoCitySchedCost(self, costs: List[List[int]]) ->int:\n N = len(costs) // 2\n hp = []\n ans = 0\n for i, c in enumerate(costs):\n cost_a, cost_b = c\n heapq.heappush(hp, cost_b - cost_a)\n ans += cost_a\n if len(hp) > N:\n ans += heapq.heappop(hp)\n return ans\n",
"<docstring token>\n<import token>\n\n\nclass Solution:\n\n def twoCitySchedCost_sort(self, costs: List[List[int]]) ->int:\n costs.sort(key=lambda x: x[1] - x[0])\n N = len(costs) // 2\n ans = 0\n for i in range(N):\n ans += costs[i][1]\n for j in range(N, 2 * N):\n ans += costs[j][0]\n return ans\n\n def twoCitySchedCost(self, costs: List[List[int]]) ->int:\n N = len(costs) // 2\n hp = []\n ans = 0\n for i, c in enumerate(costs):\n cost_a, cost_b = c\n heapq.heappush(hp, cost_b - cost_a)\n ans += cost_a\n if len(hp) > N:\n ans += heapq.heappop(hp)\n return ans\n",
"<docstring token>\n<import token>\n\n\nclass Solution:\n\n def twoCitySchedCost_sort(self, costs: List[List[int]]) ->int:\n costs.sort(key=lambda x: x[1] - x[0])\n N = len(costs) // 2\n ans = 0\n for i in range(N):\n ans += costs[i][1]\n for j in range(N, 2 * N):\n ans += costs[j][0]\n return ans\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Solution:\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
98,649 |
8f9a3134a2ac3f75a06ea1e49891030783f49b27
|
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_boston
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import KFold
import sklearn
from sklearn.model_selection import cross_val_score
print('The scikit-learn version is {}.'.format(sklearn.__version__))
boston = load_boston()
print(boston.data)
scaler = preprocessing.StandardScaler().fit(boston.data)
X_train_transformed = scaler.transform(boston.data)
y = boston.target
print(X_train_transformed)
p_list = np.linspace(1, 10, num=200 )
print(p_list)
kf = KFold(shuffle=True,random_state=42,n_splits=5)
means = []
for k in range(1, 201):
p = p_list[k-1]
kn = KNeighborsRegressor(n_neighbors=5,weights='distance',p=p,metric='minkowski')
kn.fit(X_train_transformed, y)
array = cross_val_score(estimator=kn, X=X_train_transformed, y=y, cv=kf, scoring='neg_mean_squared_error')
m = array.mean()
means.append(m)
print (means)
print(max(means))
print(np.argmax(means))
print(p_list[np.argmax(means)])
#kn = KNeighborsRegressor()
|
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.datasets import load_boston\nfrom sklearn import preprocessing\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.model_selection import KFold\nimport sklearn\nfrom sklearn.model_selection import cross_val_score\n\nprint('The scikit-learn version is {}.'.format(sklearn.__version__))\n\nboston = load_boston()\n\nprint(boston.data) \n\nscaler = preprocessing.StandardScaler().fit(boston.data)\n\nX_train_transformed = scaler.transform(boston.data)\ny = boston.target\nprint(X_train_transformed)\n\np_list = np.linspace(1, 10, num=200 )\n\nprint(p_list)\n\nkf = KFold(shuffle=True,random_state=42,n_splits=5)\nmeans = []\n\nfor k in range(1, 201):\n p = p_list[k-1]\n kn = KNeighborsRegressor(n_neighbors=5,weights='distance',p=p,metric='minkowski')\n kn.fit(X_train_transformed, y)\n array = cross_val_score(estimator=kn, X=X_train_transformed, y=y, cv=kf, scoring='neg_mean_squared_error')\n m = array.mean()\n means.append(m)\n\nprint (means)\n\nprint(max(means))\nprint(np.argmax(means))\nprint(p_list[np.argmax(means)])\n #kn = KNeighborsRegressor()",
"import numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.datasets import load_boston\nfrom sklearn import preprocessing\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.model_selection import KFold\nimport sklearn\nfrom sklearn.model_selection import cross_val_score\nprint('The scikit-learn version is {}.'.format(sklearn.__version__))\nboston = load_boston()\nprint(boston.data)\nscaler = preprocessing.StandardScaler().fit(boston.data)\nX_train_transformed = scaler.transform(boston.data)\ny = boston.target\nprint(X_train_transformed)\np_list = np.linspace(1, 10, num=200)\nprint(p_list)\nkf = KFold(shuffle=True, random_state=42, n_splits=5)\nmeans = []\nfor k in range(1, 201):\n p = p_list[k - 1]\n kn = KNeighborsRegressor(n_neighbors=5, weights='distance', p=p, metric\n ='minkowski')\n kn.fit(X_train_transformed, y)\n array = cross_val_score(estimator=kn, X=X_train_transformed, y=y, cv=kf,\n scoring='neg_mean_squared_error')\n m = array.mean()\n means.append(m)\nprint(means)\nprint(max(means))\nprint(np.argmax(means))\nprint(p_list[np.argmax(means)])\n",
"<import token>\nprint('The scikit-learn version is {}.'.format(sklearn.__version__))\nboston = load_boston()\nprint(boston.data)\nscaler = preprocessing.StandardScaler().fit(boston.data)\nX_train_transformed = scaler.transform(boston.data)\ny = boston.target\nprint(X_train_transformed)\np_list = np.linspace(1, 10, num=200)\nprint(p_list)\nkf = KFold(shuffle=True, random_state=42, n_splits=5)\nmeans = []\nfor k in range(1, 201):\n p = p_list[k - 1]\n kn = KNeighborsRegressor(n_neighbors=5, weights='distance', p=p, metric\n ='minkowski')\n kn.fit(X_train_transformed, y)\n array = cross_val_score(estimator=kn, X=X_train_transformed, y=y, cv=kf,\n scoring='neg_mean_squared_error')\n m = array.mean()\n means.append(m)\nprint(means)\nprint(max(means))\nprint(np.argmax(means))\nprint(p_list[np.argmax(means)])\n",
"<import token>\nprint('The scikit-learn version is {}.'.format(sklearn.__version__))\n<assignment token>\nprint(boston.data)\n<assignment token>\nprint(X_train_transformed)\n<assignment token>\nprint(p_list)\n<assignment token>\nfor k in range(1, 201):\n p = p_list[k - 1]\n kn = KNeighborsRegressor(n_neighbors=5, weights='distance', p=p, metric\n ='minkowski')\n kn.fit(X_train_transformed, y)\n array = cross_val_score(estimator=kn, X=X_train_transformed, y=y, cv=kf,\n scoring='neg_mean_squared_error')\n m = array.mean()\n means.append(m)\nprint(means)\nprint(max(means))\nprint(np.argmax(means))\nprint(p_list[np.argmax(means)])\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,650 |
5b92e7c943c3222d62f2303746e0597da163dc62
|
import csv
from pathlib import Path
from tqdm import tqdm
class Submission:
def __init__(self):
self._data = []
def add_entry(self, test_id, bandwidth, max_user):
entry = (test_id, round(bandwidth, 2), int(max_user))
self._data.append(entry)
def write(self, directory='output', filename='submission.csv'):
output_dir = Path(__name__).parent / directory
output_dir.mkdir(parents=True, exist_ok=True)
output_file = output_dir / filename
with output_file.open('w', newline='') as csv_file:
fieldnames = ['id', 'label']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for entry in tqdm(self._data):
writer.writerow({'id': entry[0], 'label': '{} {}'.format(entry[1], entry[2])})
|
[
"import csv\nfrom pathlib import Path\n\nfrom tqdm import tqdm\n\n\nclass Submission:\n\n def __init__(self):\n self._data = []\n\n def add_entry(self, test_id, bandwidth, max_user):\n entry = (test_id, round(bandwidth, 2), int(max_user))\n self._data.append(entry)\n\n def write(self, directory='output', filename='submission.csv'):\n output_dir = Path(__name__).parent / directory\n output_dir.mkdir(parents=True, exist_ok=True)\n output_file = output_dir / filename\n with output_file.open('w', newline='') as csv_file:\n fieldnames = ['id', 'label']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for entry in tqdm(self._data):\n writer.writerow({'id': entry[0], 'label': '{} {}'.format(entry[1], entry[2])})\n",
"import csv\nfrom pathlib import Path\nfrom tqdm import tqdm\n\n\nclass Submission:\n\n def __init__(self):\n self._data = []\n\n def add_entry(self, test_id, bandwidth, max_user):\n entry = test_id, round(bandwidth, 2), int(max_user)\n self._data.append(entry)\n\n def write(self, directory='output', filename='submission.csv'):\n output_dir = Path(__name__).parent / directory\n output_dir.mkdir(parents=True, exist_ok=True)\n output_file = output_dir / filename\n with output_file.open('w', newline='') as csv_file:\n fieldnames = ['id', 'label']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for entry in tqdm(self._data):\n writer.writerow({'id': entry[0], 'label': '{} {}'.format(\n entry[1], entry[2])})\n",
"<import token>\n\n\nclass Submission:\n\n def __init__(self):\n self._data = []\n\n def add_entry(self, test_id, bandwidth, max_user):\n entry = test_id, round(bandwidth, 2), int(max_user)\n self._data.append(entry)\n\n def write(self, directory='output', filename='submission.csv'):\n output_dir = Path(__name__).parent / directory\n output_dir.mkdir(parents=True, exist_ok=True)\n output_file = output_dir / filename\n with output_file.open('w', newline='') as csv_file:\n fieldnames = ['id', 'label']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for entry in tqdm(self._data):\n writer.writerow({'id': entry[0], 'label': '{} {}'.format(\n entry[1], entry[2])})\n",
"<import token>\n\n\nclass Submission:\n <function token>\n\n def add_entry(self, test_id, bandwidth, max_user):\n entry = test_id, round(bandwidth, 2), int(max_user)\n self._data.append(entry)\n\n def write(self, directory='output', filename='submission.csv'):\n output_dir = Path(__name__).parent / directory\n output_dir.mkdir(parents=True, exist_ok=True)\n output_file = output_dir / filename\n with output_file.open('w', newline='') as csv_file:\n fieldnames = ['id', 'label']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for entry in tqdm(self._data):\n writer.writerow({'id': entry[0], 'label': '{} {}'.format(\n entry[1], entry[2])})\n",
"<import token>\n\n\nclass Submission:\n <function token>\n <function token>\n\n def write(self, directory='output', filename='submission.csv'):\n output_dir = Path(__name__).parent / directory\n output_dir.mkdir(parents=True, exist_ok=True)\n output_file = output_dir / filename\n with output_file.open('w', newline='') as csv_file:\n fieldnames = ['id', 'label']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for entry in tqdm(self._data):\n writer.writerow({'id': entry[0], 'label': '{} {}'.format(\n entry[1], entry[2])})\n",
"<import token>\n\n\nclass Submission:\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,651 |
51975296b34f16a89dae7aa6be6c1dbfffdfa803
|
import math
import timeout_decorator
import jps
import json
from heapq import *
def almostEqual(x,y):
return abs(x-y) < 10**-4
class Vector(object):
def __init__(self,x=0,y=0):
self.x = x
self.y = y
def add(self,other):
return Vector(self.x + other.x, self.y + other.y)
def minus(self,other):
return Vector(self.x - other.x, self.y - other.y)
def magnitude(self):
norm = math.sqrt(self.x ** 2 + self.y ** 2)
return norm
def normalize(self):
if (self.isZero()):
return Vector()
norm = self.magnitude()
normalizedX = self.x / norm
normalizedY = self.y / norm
return Vector(normalizedX, normalizedY)
def multiply(self,factor):
return Vector(self.x * factor, self.y * factor)
def distanceSquared(self, other):
return (self.x - other.x) **2 + (self.y - other.y)**2
def __repr__(self):
return "%f,%f" % (self.x, self.y)
def __eq__(self,other):
return (isinstance(other,Vector) and
self.x == other.x and self.y == other.y)
def toTuple(self):
return (self.x,self.y)
def fromString(inputString):
split = inputString.split(",")
x = float(split[0])
y = float(split[1])
return Vector(x,y)
def fromTuple(inputTuple):
(x,y) = inputTuple
return Vector(x,y)
def roundToInteger(self):
x = int(round(self.x))
y = int(round(self.y))
return Vector(x,y)
def isZero(self):
return almostEqual(self.x,0) and almostEqual(self.y,0)
def toDict(self):
rawDict = {}
rawDict['x'] = self.x
rawDict['y'] = self.y
return rawDict
def toJson(self):
#6-7 hours on zooming, why
return json.dumps(self.toDict())
def __hash__(self):
return hash((self.x, self.y))
class AABB(object):
def __init__(self,minV,maxV):
self.minV = minV
self.maxV = maxV
def containsPoint(self,v):
if v.x < self.minV.x or v.y < self.minV.y:
return False
if v.x > self.maxV.x or v.y > self.maxV.y:
return False
return True
def collides(self, other):
if (self.maxV.x < other.minV.x
or self.maxV.y < other.minV.y
or self.minV.x > other.maxV.x
or self.minV.y > other.maxV.y):
return False
return True
def center(self):
return self.minV.add(self.maxV).multiply(.5)
#other is the thing u wanna stop colliding with
def minTranslation(self, other):
mt = Vector()
left = other.minV.x - self.maxV.x
right = other.maxV.x - self.minV.x
top = other.minV.y - self.maxV.y
bottom = other.maxV.y - self.minV.y
if (left > 0 or right < 0):
return mt
if (top > 0 or bottom < 0):
return mt
if abs(left) < right:
mt.x = left
else:
mt.x = right
if abs(top) < bottom:
mt.y = top
else:
mt.y = bottom
if abs(mt.x) < abs(mt.y):
mt.y = 0
else:
mt.x = 0
return mt
def __repr__(self):
return str((self.minV,self.maxV))
def heuristic(p1, p2):
return abs(p2[0]-p1[0]) + abs(p2[1]-p1[1])
dirs = [(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1)]
#handle trying to diagnal when 2 in way
specialCheck = {(1,1):[(0,1),(1,0)],(-1,1):[(-1,0),(1,0)],
(-1,-1):[(-1,0),(0,-1)],
(1,-1):[(1,0),(0,-1)]}
def isValid(array,locTuple):
x,y = locTuple
if x < 0 or x >= len(array):
return False
if y < 0 or y >= len(array[0]):
return False
if array[x][y] == True:
return False
return True
#def findPath(mapdata,startVector,goalVector):
# naivePath = findNaivePath(mapdata,startVector,goalVector)
# if (naivePath != -1):
# return naivePath
# jpsPath = performJps(mapdata,startVector,goalVector)
# return jpsPath
def findPathAndSet(goal,currentLocation):
mapdata = goal.game.map
startVector = currentLocation
goalVector = goal.goalLocation
naivePath = findNaivePath(mapdata,startVector,goalVector)
if (naivePath != -1 and naivePath != None):
goal.path = naivePath
goal.pathI = 1
return
doJPSAsync(goal,mapdata,startVector,goalVector)
#http://code.activestate.com/recipes/576684-simple-threading-decorator/
def run_async(func):
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def doJPSAsync(goal,mapdata,startVector,goalVector):
jpsPath = performJps(mapdata,startVector,goalVector)
goal.path = jpsPath
if goal.path == -1:
goal.valid = False
elif goal.path != None:
goal.pathI = 1
else:
del goal.path
def findNaivePath(mapdata,startVector,goalVector):
collision = mapdata.collision.array
start = startVector.multiply(1/mapdata.collisionFactor).roundToInteger()
goal = goalVector.multiply(1/mapdata.collisionFactor).roundToInteger()
if (start == goal):
return -1
if (not isValid(collision,start.toTuple()) or not isValid(collision,goal.toTuple())):
return -1
directionVector = goal.minus(start).normalize()
current = start
while (current.distanceSquared(goal) > 1.2):
current = current.add(directionVector)
if (not isValid(collision,current.roundToInteger().toTuple())):
return -1
return [startVector,goalVector]
def performJps(mapdata,startVector, goalVector):
collision = mapdata.collision.array
jps.pad_field(collision)
start = startVector.multiply(1/mapdata.collisionFactor).roundToInteger().toTuple()
goal = goalVector.multiply(1/mapdata.collisionFactor).roundToInteger().toTuple()
(startX, startY) = start
(endX, endY) = goal
try:
rawPath = jps.jps(collision,startX,startY,endX,endY)
except:
return -1
path = []
for coord in rawPath:
path.append(Vector.fromTuple(coord).multiply(mapdata.collisionFactor))
return path
#Returns path of vectors, none if no path
#path finding based currently on pseudocode availiable on wikipedia
#In the future I want to implement another search that will solve the
#timeout issues
#More than 150ms is trouble
@timeout_decorator.timeout(.15, use_signals=False)
def aStarPath(mapdata, startVector, goalVector):
collision = mapdata.collision.array
start = startVector.multiply(1/mapdata.collisionFactor).roundToInteger().toTuple()
goal = goalVector.multiply(1/mapdata.collisionFactor).roundToInteger().toTuple()
if not isValid(collision,goal):
return None
closeSet = set()
prev = {}
g = {start:0}
f = {start:heuristic(start,goal)}
openHeap = []
#This is just the python version of a priority queue
heappush(openHeap,(f[start],start))
while openHeap:
cur = heappop(openHeap)[1]
if (cur == goal):
path = []
while cur in prev:
path.append(Vector.fromTuple(cur).multiply(mapdata.collisionFactor))
cur = prev[cur]
path = list(reversed(path))
return path
closeSet.add(cur)
for oX,oY in dirs:
neighbor = cur[0] + oX, cur[1] + oY
tentGscore = g[cur] + heuristic(cur,neighbor)
if (neighbor[0] < 0 or neighbor[0] >= len(collision) or
neighbor[1] < 0 or neighbor[1] >= len(collision[0])
or not isValidMove(collision,cur,neighbor,(oX,oY))):
continue
if neighbor in closeSet and tentGscore > g.get(neighbor,0):
continue
if tentGscore < g.get(neighbor,0) or neighbor not in [i[1] for i in openHeap]:
prev[neighbor] = cur
g[neighbor] = tentGscore
f[neighbor] = tentGscore + heuristic(neighbor,goal)
heappush(openHeap,(f[neighbor],neighbor))
return None
#http://stackoverflow.com/questions/12435211/python-threading-timer-repeat-function-every-n-seconds
import threading
def setInterval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
while not stopped.wait(interval): # until stopped
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True # stop if the program exits
t.start()
return stopped
return wrapper
return decorator
|
[
"import math\nimport timeout_decorator\nimport jps\nimport json\nfrom heapq import *\ndef almostEqual(x,y):\n return abs(x-y) < 10**-4\n\nclass Vector(object):\n def __init__(self,x=0,y=0):\n self.x = x \n self.y = y\n def add(self,other):\n return Vector(self.x + other.x, self.y + other.y)\n def minus(self,other):\n return Vector(self.x - other.x, self.y - other.y)\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n def normalize(self):\n if (self.isZero()):\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm \n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n def multiply(self,factor):\n return Vector(self.x * factor, self.y * factor)\n def distanceSquared(self, other):\n return (self.x - other.x) **2 + (self.y - other.y)**2\n def __repr__(self):\n return \"%f,%f\" % (self.x, self.y)\n def __eq__(self,other):\n return (isinstance(other,Vector) and \n self.x == other.x and self.y == other.y)\n def toTuple(self):\n return (self.x,self.y)\n def fromString(inputString):\n split = inputString.split(\",\")\n x = float(split[0])\n y = float(split[1])\n return Vector(x,y)\n def fromTuple(inputTuple):\n (x,y) = inputTuple\n return Vector(x,y)\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x,y)\n def isZero(self):\n return almostEqual(self.x,0) and almostEqual(self.y,0)\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n def toJson(self):\n #6-7 hours on zooming, why\n return json.dumps(self.toDict())\n def __hash__(self):\n return hash((self.x, self.y))\nclass AABB(object):\n def __init__(self,minV,maxV):\n self.minV = minV\n self.maxV = maxV\n def containsPoint(self,v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n def collides(self, other):\n if (self.maxV.x < other.minV.x\n or self.maxV.y < other.minV.y\n or self.minV.x > other.maxV.x\n or self.minV.y > other.maxV.y):\n return False\n return True\n def center(self):\n return self.minV.add(self.maxV).multiply(.5)\n #other is the thing u wanna stop colliding with\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if (left > 0 or right < 0):\n return mt\n if (top > 0 or bottom < 0):\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n def __repr__(self):\n return str((self.minV,self.maxV))\n\ndef heuristic(p1, p2):\n return abs(p2[0]-p1[0]) + abs(p2[1]-p1[1])\ndirs = [(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1)]\n#handle trying to diagnal when 2 in way\nspecialCheck = {(1,1):[(0,1),(1,0)],(-1,1):[(-1,0),(1,0)],\n(-1,-1):[(-1,0),(0,-1)],\n(1,-1):[(1,0),(0,-1)]}\ndef isValid(array,locTuple):\n x,y = locTuple\n if x < 0 or x >= len(array):\n return False\n if y < 0 or y >= len(array[0]):\n return False\n if array[x][y] == True:\n return False\n return True\n#def findPath(mapdata,startVector,goalVector):\n# naivePath = findNaivePath(mapdata,startVector,goalVector)\n# if (naivePath != -1):\n# return naivePath\n# jpsPath = performJps(mapdata,startVector,goalVector)\n# return jpsPath\ndef findPathAndSet(goal,currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata,startVector,goalVector)\n if (naivePath != -1 and naivePath != None):\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal,mapdata,startVector,goalVector)\n#http://code.activestate.com/recipes/576684-simple-threading-decorator/\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target = func, args = args, kwargs = kwargs)\n func_hl.start()\n return func_hl\n return async_func\n@run_async\ndef doJPSAsync(goal,mapdata,startVector,goalVector):\n jpsPath = performJps(mapdata,startVector,goalVector)\n goal.path = jpsPath\n if goal.path == -1:\n goal.valid = False\n elif goal.path != None:\n goal.pathI = 1\n else:\n del goal.path\ndef findNaivePath(mapdata,startVector,goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1/mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1/mapdata.collisionFactor).roundToInteger()\n if (start == goal):\n return -1\n if (not isValid(collision,start.toTuple()) or not isValid(collision,goal.toTuple())):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while (current.distanceSquared(goal) > 1.2):\n current = current.add(directionVector)\n if (not isValid(collision,current.roundToInteger().toTuple())):\n return -1\n return [startVector,goalVector]\ndef performJps(mapdata,startVector, goalVector):\n collision = mapdata.collision.array\n jps.pad_field(collision)\n start = startVector.multiply(1/mapdata.collisionFactor).roundToInteger().toTuple()\n goal = goalVector.multiply(1/mapdata.collisionFactor).roundToInteger().toTuple()\n\n (startX, startY) = start\n (endX, endY) = goal\n try:\n rawPath = jps.jps(collision,startX,startY,endX,endY)\n except:\n return -1\n path = []\n for coord in rawPath:\n path.append(Vector.fromTuple(coord).multiply(mapdata.collisionFactor))\n return path\n\n#Returns path of vectors, none if no path\n#path finding based currently on pseudocode availiable on wikipedia\n#In the future I want to implement another search that will solve the \n#timeout issues\n#More than 150ms is trouble\n@timeout_decorator.timeout(.15, use_signals=False)\ndef aStarPath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1/mapdata.collisionFactor).roundToInteger().toTuple()\n goal = goalVector.multiply(1/mapdata.collisionFactor).roundToInteger().toTuple()\n if not isValid(collision,goal):\n return None\n closeSet = set()\n prev = {}\n g = {start:0}\n f = {start:heuristic(start,goal)}\n openHeap = []\n #This is just the python version of a priority queue\n heappush(openHeap,(f[start],start))\n while openHeap:\n cur = heappop(openHeap)[1]\n if (cur == goal):\n path = []\n while cur in prev:\n path.append(Vector.fromTuple(cur).multiply(mapdata.collisionFactor))\n cur = prev[cur]\n path = list(reversed(path))\n return path\n closeSet.add(cur)\n for oX,oY in dirs:\n neighbor = cur[0] + oX, cur[1] + oY\n tentGscore = g[cur] + heuristic(cur,neighbor)\n if (neighbor[0] < 0 or neighbor[0] >= len(collision) or \n neighbor[1] < 0 or neighbor[1] >= len(collision[0])\n or not isValidMove(collision,cur,neighbor,(oX,oY))):\n continue\n if neighbor in closeSet and tentGscore > g.get(neighbor,0):\n continue\n if tentGscore < g.get(neighbor,0) or neighbor not in [i[1] for i in openHeap]:\n prev[neighbor] = cur\n g[neighbor] = tentGscore\n f[neighbor] = tentGscore + heuristic(neighbor,goal)\n heappush(openHeap,(f[neighbor],neighbor))\n return None\n#http://stackoverflow.com/questions/12435211/python-threading-timer-repeat-function-every-n-seconds\nimport threading\n\ndef setInterval(interval):\n def decorator(function):\n def wrapper(*args, **kwargs):\n stopped = threading.Event()\n\n def loop(): # executed in another thread\n while not stopped.wait(interval): # until stopped\n function(*args, **kwargs)\n\n t = threading.Thread(target=loop)\n t.daemon = True # stop if the program exits\n t.start()\n return stopped\n return wrapper\n return decorator",
"import math\nimport timeout_decorator\nimport jps\nimport json\nfrom heapq import *\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\ndef heuristic(p1, p2):\n return abs(p2[0] - p1[0]) + abs(p2[1] - p1[1])\n\n\ndirs = [(0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)]\nspecialCheck = {(1, 1): [(0, 1), (1, 0)], (-1, 1): [(-1, 0), (1, 0)], (-1, \n -1): [(-1, 0), (0, -1)], (1, -1): [(1, 0), (0, -1)]}\n\n\ndef isValid(array, locTuple):\n x, y = locTuple\n if x < 0 or x >= len(array):\n return False\n if y < 0 or y >= len(array[0]):\n return False\n if array[x][y] == True:\n return False\n return True\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n@run_async\ndef doJPSAsync(goal, mapdata, startVector, goalVector):\n jpsPath = performJps(mapdata, startVector, goalVector)\n goal.path = jpsPath\n if goal.path == -1:\n goal.valid = False\n elif goal.path != None:\n goal.pathI = 1\n else:\n del goal.path\n\n\ndef findNaivePath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n if start == goal:\n return -1\n if not isValid(collision, start.toTuple()) or not isValid(collision,\n goal.toTuple()):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while current.distanceSquared(goal) > 1.2:\n current = current.add(directionVector)\n if not isValid(collision, current.roundToInteger().toTuple()):\n return -1\n return [startVector, goalVector]\n\n\ndef performJps(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n jps.pad_field(collision)\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n startX, startY = start\n endX, endY = goal\n try:\n rawPath = jps.jps(collision, startX, startY, endX, endY)\n except:\n return -1\n path = []\n for coord in rawPath:\n path.append(Vector.fromTuple(coord).multiply(mapdata.collisionFactor))\n return path\n\n\n@timeout_decorator.timeout(0.15, use_signals=False)\ndef aStarPath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n if not isValid(collision, goal):\n return None\n closeSet = set()\n prev = {}\n g = {start: 0}\n f = {start: heuristic(start, goal)}\n openHeap = []\n heappush(openHeap, (f[start], start))\n while openHeap:\n cur = heappop(openHeap)[1]\n if cur == goal:\n path = []\n while cur in prev:\n path.append(Vector.fromTuple(cur).multiply(mapdata.\n collisionFactor))\n cur = prev[cur]\n path = list(reversed(path))\n return path\n closeSet.add(cur)\n for oX, oY in dirs:\n neighbor = cur[0] + oX, cur[1] + oY\n tentGscore = g[cur] + heuristic(cur, neighbor)\n if neighbor[0] < 0 or neighbor[0] >= len(collision) or neighbor[1\n ] < 0 or neighbor[1] >= len(collision[0]) or not isValidMove(\n collision, cur, neighbor, (oX, oY)):\n continue\n if neighbor in closeSet and tentGscore > g.get(neighbor, 0):\n continue\n if tentGscore < g.get(neighbor, 0) or neighbor not in [i[1] for\n i in openHeap]:\n prev[neighbor] = cur\n g[neighbor] = tentGscore\n f[neighbor] = tentGscore + heuristic(neighbor, goal)\n heappush(openHeap, (f[neighbor], neighbor))\n return None\n\n\nimport threading\n\n\ndef setInterval(interval):\n\n def decorator(function):\n\n def wrapper(*args, **kwargs):\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval):\n function(*args, **kwargs)\n t = threading.Thread(target=loop)\n t.daemon = True\n t.start()\n return stopped\n return wrapper\n return decorator\n",
"<import token>\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\ndef heuristic(p1, p2):\n return abs(p2[0] - p1[0]) + abs(p2[1] - p1[1])\n\n\ndirs = [(0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)]\nspecialCheck = {(1, 1): [(0, 1), (1, 0)], (-1, 1): [(-1, 0), (1, 0)], (-1, \n -1): [(-1, 0), (0, -1)], (1, -1): [(1, 0), (0, -1)]}\n\n\ndef isValid(array, locTuple):\n x, y = locTuple\n if x < 0 or x >= len(array):\n return False\n if y < 0 or y >= len(array[0]):\n return False\n if array[x][y] == True:\n return False\n return True\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n@run_async\ndef doJPSAsync(goal, mapdata, startVector, goalVector):\n jpsPath = performJps(mapdata, startVector, goalVector)\n goal.path = jpsPath\n if goal.path == -1:\n goal.valid = False\n elif goal.path != None:\n goal.pathI = 1\n else:\n del goal.path\n\n\ndef findNaivePath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n if start == goal:\n return -1\n if not isValid(collision, start.toTuple()) or not isValid(collision,\n goal.toTuple()):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while current.distanceSquared(goal) > 1.2:\n current = current.add(directionVector)\n if not isValid(collision, current.roundToInteger().toTuple()):\n return -1\n return [startVector, goalVector]\n\n\ndef performJps(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n jps.pad_field(collision)\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n startX, startY = start\n endX, endY = goal\n try:\n rawPath = jps.jps(collision, startX, startY, endX, endY)\n except:\n return -1\n path = []\n for coord in rawPath:\n path.append(Vector.fromTuple(coord).multiply(mapdata.collisionFactor))\n return path\n\n\n@timeout_decorator.timeout(0.15, use_signals=False)\ndef aStarPath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n if not isValid(collision, goal):\n return None\n closeSet = set()\n prev = {}\n g = {start: 0}\n f = {start: heuristic(start, goal)}\n openHeap = []\n heappush(openHeap, (f[start], start))\n while openHeap:\n cur = heappop(openHeap)[1]\n if cur == goal:\n path = []\n while cur in prev:\n path.append(Vector.fromTuple(cur).multiply(mapdata.\n collisionFactor))\n cur = prev[cur]\n path = list(reversed(path))\n return path\n closeSet.add(cur)\n for oX, oY in dirs:\n neighbor = cur[0] + oX, cur[1] + oY\n tentGscore = g[cur] + heuristic(cur, neighbor)\n if neighbor[0] < 0 or neighbor[0] >= len(collision) or neighbor[1\n ] < 0 or neighbor[1] >= len(collision[0]) or not isValidMove(\n collision, cur, neighbor, (oX, oY)):\n continue\n if neighbor in closeSet and tentGscore > g.get(neighbor, 0):\n continue\n if tentGscore < g.get(neighbor, 0) or neighbor not in [i[1] for\n i in openHeap]:\n prev[neighbor] = cur\n g[neighbor] = tentGscore\n f[neighbor] = tentGscore + heuristic(neighbor, goal)\n heappush(openHeap, (f[neighbor], neighbor))\n return None\n\n\n<import token>\n\n\ndef setInterval(interval):\n\n def decorator(function):\n\n def wrapper(*args, **kwargs):\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval):\n function(*args, **kwargs)\n t = threading.Thread(target=loop)\n t.daemon = True\n t.start()\n return stopped\n return wrapper\n return decorator\n",
"<import token>\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\ndef heuristic(p1, p2):\n return abs(p2[0] - p1[0]) + abs(p2[1] - p1[1])\n\n\n<assignment token>\n\n\ndef isValid(array, locTuple):\n x, y = locTuple\n if x < 0 or x >= len(array):\n return False\n if y < 0 or y >= len(array[0]):\n return False\n if array[x][y] == True:\n return False\n return True\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n@run_async\ndef doJPSAsync(goal, mapdata, startVector, goalVector):\n jpsPath = performJps(mapdata, startVector, goalVector)\n goal.path = jpsPath\n if goal.path == -1:\n goal.valid = False\n elif goal.path != None:\n goal.pathI = 1\n else:\n del goal.path\n\n\ndef findNaivePath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n if start == goal:\n return -1\n if not isValid(collision, start.toTuple()) or not isValid(collision,\n goal.toTuple()):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while current.distanceSquared(goal) > 1.2:\n current = current.add(directionVector)\n if not isValid(collision, current.roundToInteger().toTuple()):\n return -1\n return [startVector, goalVector]\n\n\ndef performJps(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n jps.pad_field(collision)\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n startX, startY = start\n endX, endY = goal\n try:\n rawPath = jps.jps(collision, startX, startY, endX, endY)\n except:\n return -1\n path = []\n for coord in rawPath:\n path.append(Vector.fromTuple(coord).multiply(mapdata.collisionFactor))\n return path\n\n\n@timeout_decorator.timeout(0.15, use_signals=False)\ndef aStarPath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n if not isValid(collision, goal):\n return None\n closeSet = set()\n prev = {}\n g = {start: 0}\n f = {start: heuristic(start, goal)}\n openHeap = []\n heappush(openHeap, (f[start], start))\n while openHeap:\n cur = heappop(openHeap)[1]\n if cur == goal:\n path = []\n while cur in prev:\n path.append(Vector.fromTuple(cur).multiply(mapdata.\n collisionFactor))\n cur = prev[cur]\n path = list(reversed(path))\n return path\n closeSet.add(cur)\n for oX, oY in dirs:\n neighbor = cur[0] + oX, cur[1] + oY\n tentGscore = g[cur] + heuristic(cur, neighbor)\n if neighbor[0] < 0 or neighbor[0] >= len(collision) or neighbor[1\n ] < 0 or neighbor[1] >= len(collision[0]) or not isValidMove(\n collision, cur, neighbor, (oX, oY)):\n continue\n if neighbor in closeSet and tentGscore > g.get(neighbor, 0):\n continue\n if tentGscore < g.get(neighbor, 0) or neighbor not in [i[1] for\n i in openHeap]:\n prev[neighbor] = cur\n g[neighbor] = tentGscore\n f[neighbor] = tentGscore + heuristic(neighbor, goal)\n heappush(openHeap, (f[neighbor], neighbor))\n return None\n\n\n<import token>\n\n\ndef setInterval(interval):\n\n def decorator(function):\n\n def wrapper(*args, **kwargs):\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval):\n function(*args, **kwargs)\n t = threading.Thread(target=loop)\n t.daemon = True\n t.start()\n return stopped\n return wrapper\n return decorator\n",
"<import token>\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\ndef heuristic(p1, p2):\n return abs(p2[0] - p1[0]) + abs(p2[1] - p1[1])\n\n\n<assignment token>\n\n\ndef isValid(array, locTuple):\n x, y = locTuple\n if x < 0 or x >= len(array):\n return False\n if y < 0 or y >= len(array[0]):\n return False\n if array[x][y] == True:\n return False\n return True\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n<function token>\n\n\ndef findNaivePath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n if start == goal:\n return -1\n if not isValid(collision, start.toTuple()) or not isValid(collision,\n goal.toTuple()):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while current.distanceSquared(goal) > 1.2:\n current = current.add(directionVector)\n if not isValid(collision, current.roundToInteger().toTuple()):\n return -1\n return [startVector, goalVector]\n\n\ndef performJps(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n jps.pad_field(collision)\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n startX, startY = start\n endX, endY = goal\n try:\n rawPath = jps.jps(collision, startX, startY, endX, endY)\n except:\n return -1\n path = []\n for coord in rawPath:\n path.append(Vector.fromTuple(coord).multiply(mapdata.collisionFactor))\n return path\n\n\n@timeout_decorator.timeout(0.15, use_signals=False)\ndef aStarPath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n if not isValid(collision, goal):\n return None\n closeSet = set()\n prev = {}\n g = {start: 0}\n f = {start: heuristic(start, goal)}\n openHeap = []\n heappush(openHeap, (f[start], start))\n while openHeap:\n cur = heappop(openHeap)[1]\n if cur == goal:\n path = []\n while cur in prev:\n path.append(Vector.fromTuple(cur).multiply(mapdata.\n collisionFactor))\n cur = prev[cur]\n path = list(reversed(path))\n return path\n closeSet.add(cur)\n for oX, oY in dirs:\n neighbor = cur[0] + oX, cur[1] + oY\n tentGscore = g[cur] + heuristic(cur, neighbor)\n if neighbor[0] < 0 or neighbor[0] >= len(collision) or neighbor[1\n ] < 0 or neighbor[1] >= len(collision[0]) or not isValidMove(\n collision, cur, neighbor, (oX, oY)):\n continue\n if neighbor in closeSet and tentGscore > g.get(neighbor, 0):\n continue\n if tentGscore < g.get(neighbor, 0) or neighbor not in [i[1] for\n i in openHeap]:\n prev[neighbor] = cur\n g[neighbor] = tentGscore\n f[neighbor] = tentGscore + heuristic(neighbor, goal)\n heappush(openHeap, (f[neighbor], neighbor))\n return None\n\n\n<import token>\n\n\ndef setInterval(interval):\n\n def decorator(function):\n\n def wrapper(*args, **kwargs):\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval):\n function(*args, **kwargs)\n t = threading.Thread(target=loop)\n t.daemon = True\n t.start()\n return stopped\n return wrapper\n return decorator\n",
"<import token>\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n\n\ndef isValid(array, locTuple):\n x, y = locTuple\n if x < 0 or x >= len(array):\n return False\n if y < 0 or y >= len(array[0]):\n return False\n if array[x][y] == True:\n return False\n return True\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n<function token>\n\n\ndef findNaivePath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n if start == goal:\n return -1\n if not isValid(collision, start.toTuple()) or not isValid(collision,\n goal.toTuple()):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while current.distanceSquared(goal) > 1.2:\n current = current.add(directionVector)\n if not isValid(collision, current.roundToInteger().toTuple()):\n return -1\n return [startVector, goalVector]\n\n\ndef performJps(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n jps.pad_field(collision)\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n startX, startY = start\n endX, endY = goal\n try:\n rawPath = jps.jps(collision, startX, startY, endX, endY)\n except:\n return -1\n path = []\n for coord in rawPath:\n path.append(Vector.fromTuple(coord).multiply(mapdata.collisionFactor))\n return path\n\n\n@timeout_decorator.timeout(0.15, use_signals=False)\ndef aStarPath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n if not isValid(collision, goal):\n return None\n closeSet = set()\n prev = {}\n g = {start: 0}\n f = {start: heuristic(start, goal)}\n openHeap = []\n heappush(openHeap, (f[start], start))\n while openHeap:\n cur = heappop(openHeap)[1]\n if cur == goal:\n path = []\n while cur in prev:\n path.append(Vector.fromTuple(cur).multiply(mapdata.\n collisionFactor))\n cur = prev[cur]\n path = list(reversed(path))\n return path\n closeSet.add(cur)\n for oX, oY in dirs:\n neighbor = cur[0] + oX, cur[1] + oY\n tentGscore = g[cur] + heuristic(cur, neighbor)\n if neighbor[0] < 0 or neighbor[0] >= len(collision) or neighbor[1\n ] < 0 or neighbor[1] >= len(collision[0]) or not isValidMove(\n collision, cur, neighbor, (oX, oY)):\n continue\n if neighbor in closeSet and tentGscore > g.get(neighbor, 0):\n continue\n if tentGscore < g.get(neighbor, 0) or neighbor not in [i[1] for\n i in openHeap]:\n prev[neighbor] = cur\n g[neighbor] = tentGscore\n f[neighbor] = tentGscore + heuristic(neighbor, goal)\n heappush(openHeap, (f[neighbor], neighbor))\n return None\n\n\n<import token>\n\n\ndef setInterval(interval):\n\n def decorator(function):\n\n def wrapper(*args, **kwargs):\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval):\n function(*args, **kwargs)\n t = threading.Thread(target=loop)\n t.daemon = True\n t.start()\n return stopped\n return wrapper\n return decorator\n",
"<import token>\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n\n\ndef isValid(array, locTuple):\n x, y = locTuple\n if x < 0 or x >= len(array):\n return False\n if y < 0 or y >= len(array[0]):\n return False\n if array[x][y] == True:\n return False\n return True\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n<function token>\n\n\ndef findNaivePath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n if start == goal:\n return -1\n if not isValid(collision, start.toTuple()) or not isValid(collision,\n goal.toTuple()):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while current.distanceSquared(goal) > 1.2:\n current = current.add(directionVector)\n if not isValid(collision, current.roundToInteger().toTuple()):\n return -1\n return [startVector, goalVector]\n\n\ndef performJps(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n jps.pad_field(collision)\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n startX, startY = start\n endX, endY = goal\n try:\n rawPath = jps.jps(collision, startX, startY, endX, endY)\n except:\n return -1\n path = []\n for coord in rawPath:\n path.append(Vector.fromTuple(coord).multiply(mapdata.collisionFactor))\n return path\n\n\n@timeout_decorator.timeout(0.15, use_signals=False)\ndef aStarPath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n if not isValid(collision, goal):\n return None\n closeSet = set()\n prev = {}\n g = {start: 0}\n f = {start: heuristic(start, goal)}\n openHeap = []\n heappush(openHeap, (f[start], start))\n while openHeap:\n cur = heappop(openHeap)[1]\n if cur == goal:\n path = []\n while cur in prev:\n path.append(Vector.fromTuple(cur).multiply(mapdata.\n collisionFactor))\n cur = prev[cur]\n path = list(reversed(path))\n return path\n closeSet.add(cur)\n for oX, oY in dirs:\n neighbor = cur[0] + oX, cur[1] + oY\n tentGscore = g[cur] + heuristic(cur, neighbor)\n if neighbor[0] < 0 or neighbor[0] >= len(collision) or neighbor[1\n ] < 0 or neighbor[1] >= len(collision[0]) or not isValidMove(\n collision, cur, neighbor, (oX, oY)):\n continue\n if neighbor in closeSet and tentGscore > g.get(neighbor, 0):\n continue\n if tentGscore < g.get(neighbor, 0) or neighbor not in [i[1] for\n i in openHeap]:\n prev[neighbor] = cur\n g[neighbor] = tentGscore\n f[neighbor] = tentGscore + heuristic(neighbor, goal)\n heappush(openHeap, (f[neighbor], neighbor))\n return None\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n<function token>\n\n\ndef findNaivePath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n if start == goal:\n return -1\n if not isValid(collision, start.toTuple()) or not isValid(collision,\n goal.toTuple()):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while current.distanceSquared(goal) > 1.2:\n current = current.add(directionVector)\n if not isValid(collision, current.roundToInteger().toTuple()):\n return -1\n return [startVector, goalVector]\n\n\ndef performJps(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n jps.pad_field(collision)\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n startX, startY = start\n endX, endY = goal\n try:\n rawPath = jps.jps(collision, startX, startY, endX, endY)\n except:\n return -1\n path = []\n for coord in rawPath:\n path.append(Vector.fromTuple(coord).multiply(mapdata.collisionFactor))\n return path\n\n\n@timeout_decorator.timeout(0.15, use_signals=False)\ndef aStarPath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n if not isValid(collision, goal):\n return None\n closeSet = set()\n prev = {}\n g = {start: 0}\n f = {start: heuristic(start, goal)}\n openHeap = []\n heappush(openHeap, (f[start], start))\n while openHeap:\n cur = heappop(openHeap)[1]\n if cur == goal:\n path = []\n while cur in prev:\n path.append(Vector.fromTuple(cur).multiply(mapdata.\n collisionFactor))\n cur = prev[cur]\n path = list(reversed(path))\n return path\n closeSet.add(cur)\n for oX, oY in dirs:\n neighbor = cur[0] + oX, cur[1] + oY\n tentGscore = g[cur] + heuristic(cur, neighbor)\n if neighbor[0] < 0 or neighbor[0] >= len(collision) or neighbor[1\n ] < 0 or neighbor[1] >= len(collision[0]) or not isValidMove(\n collision, cur, neighbor, (oX, oY)):\n continue\n if neighbor in closeSet and tentGscore > g.get(neighbor, 0):\n continue\n if tentGscore < g.get(neighbor, 0) or neighbor not in [i[1] for\n i in openHeap]:\n prev[neighbor] = cur\n g[neighbor] = tentGscore\n f[neighbor] = tentGscore + heuristic(neighbor, goal)\n heappush(openHeap, (f[neighbor], neighbor))\n return None\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n<function token>\n\n\ndef findNaivePath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n if start == goal:\n return -1\n if not isValid(collision, start.toTuple()) or not isValid(collision,\n goal.toTuple()):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while current.distanceSquared(goal) > 1.2:\n current = current.add(directionVector)\n if not isValid(collision, current.roundToInteger().toTuple()):\n return -1\n return [startVector, goalVector]\n\n\n<function token>\n\n\n@timeout_decorator.timeout(0.15, use_signals=False)\ndef aStarPath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger(\n ).toTuple()\n if not isValid(collision, goal):\n return None\n closeSet = set()\n prev = {}\n g = {start: 0}\n f = {start: heuristic(start, goal)}\n openHeap = []\n heappush(openHeap, (f[start], start))\n while openHeap:\n cur = heappop(openHeap)[1]\n if cur == goal:\n path = []\n while cur in prev:\n path.append(Vector.fromTuple(cur).multiply(mapdata.\n collisionFactor))\n cur = prev[cur]\n path = list(reversed(path))\n return path\n closeSet.add(cur)\n for oX, oY in dirs:\n neighbor = cur[0] + oX, cur[1] + oY\n tentGscore = g[cur] + heuristic(cur, neighbor)\n if neighbor[0] < 0 or neighbor[0] >= len(collision) or neighbor[1\n ] < 0 or neighbor[1] >= len(collision[0]) or not isValidMove(\n collision, cur, neighbor, (oX, oY)):\n continue\n if neighbor in closeSet and tentGscore > g.get(neighbor, 0):\n continue\n if tentGscore < g.get(neighbor, 0) or neighbor not in [i[1] for\n i in openHeap]:\n prev[neighbor] = cur\n g[neighbor] = tentGscore\n f[neighbor] = tentGscore + heuristic(neighbor, goal)\n heappush(openHeap, (f[neighbor], neighbor))\n return None\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n<function token>\n\n\ndef findNaivePath(mapdata, startVector, goalVector):\n collision = mapdata.collision.array\n start = startVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n goal = goalVector.multiply(1 / mapdata.collisionFactor).roundToInteger()\n if start == goal:\n return -1\n if not isValid(collision, start.toTuple()) or not isValid(collision,\n goal.toTuple()):\n return -1\n directionVector = goal.minus(start).normalize()\n current = start\n while current.distanceSquared(goal) > 1.2:\n current = current.add(directionVector)\n if not isValid(collision, current.roundToInteger().toTuple()):\n return -1\n return [startVector, goalVector]\n\n\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n\n\ndef almostEqual(x, y):\n return abs(x - y) < 10 ** -4\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\ndef run_async(func):\n from threading import Thread\n from functools import wraps\n\n @wraps(func)\n def async_func(*args, **kwargs):\n func_hl = Thread(target=func, args=args, kwargs=kwargs)\n func_hl.start()\n return func_hl\n return async_func\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n\n\ndef findPathAndSet(goal, currentLocation):\n mapdata = goal.game.map\n startVector = currentLocation\n goalVector = goal.goalLocation\n naivePath = findNaivePath(mapdata, startVector, goalVector)\n if naivePath != -1 and naivePath != None:\n goal.path = naivePath\n goal.pathI = 1\n return\n doJPSAsync(goal, mapdata, startVector, goalVector)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def magnitude(self):\n norm = math.sqrt(self.x ** 2 + self.y ** 2)\n return norm\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n\n def fromTuple(inputTuple):\n x, y = inputTuple\n return Vector(x, y)\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def minus(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def add(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n <function token>\n\n def __eq__(self, other):\n return isinstance(other, Vector\n ) and self.x == other.x and self.y == other.y\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n\n def multiply(self, factor):\n return Vector(self.x * factor, self.y * factor)\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n <function token>\n\n def distanceSquared(self, other):\n return (self.x - other.x) ** 2 + (self.y - other.y) ** 2\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n\n def isZero(self):\n return almostEqual(self.x, 0) and almostEqual(self.y, 0)\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n <function token>\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n\n def toJson(self):\n return json.dumps(self.toDict())\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n\n def roundToInteger(self):\n x = int(round(self.x))\n y = int(round(self.y))\n return Vector(x, y)\n <function token>\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n <function token>\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n <function token>\n <function token>\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n <function token>\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n <function token>\n <function token>\n\n def toDict(self):\n rawDict = {}\n rawDict['x'] = self.x\n rawDict['y'] = self.y\n return rawDict\n <function token>\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def normalize(self):\n if self.isZero():\n return Vector()\n norm = self.magnitude()\n normalizedX = self.x / norm\n normalizedY = self.y / norm\n return Vector(normalizedX, normalizedY)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n\n def fromString(inputString):\n split = inputString.split(',')\n x = float(split[0])\n y = float(split[1])\n return Vector(x, y)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def toTuple(self):\n return self.x, self.y\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Vector(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n\n def __repr__(self):\n return str((self.minV, self.maxV))\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n\n def containsPoint(self, v):\n if v.x < self.minV.x or v.y < self.minV.y:\n return False\n if v.x > self.maxV.x or v.y > self.maxV.y:\n return False\n return True\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n <function token>\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n <function token>\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n\n def minTranslation(self, other):\n mt = Vector()\n left = other.minV.x - self.maxV.x\n right = other.maxV.x - self.minV.x\n top = other.minV.y - self.maxV.y\n bottom = other.maxV.y - self.minV.y\n if left > 0 or right < 0:\n return mt\n if top > 0 or bottom < 0:\n return mt\n if abs(left) < right:\n mt.x = left\n else:\n mt.x = right\n if abs(top) < bottom:\n mt.y = top\n else:\n mt.y = bottom\n if abs(mt.x) < abs(mt.y):\n mt.y = 0\n else:\n mt.x = 0\n return mt\n <function token>\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n\n\nclass AABB(object):\n\n def __init__(self, minV, maxV):\n self.minV = minV\n self.maxV = maxV\n <function token>\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n <function token>\n <function token>\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n\n\nclass AABB(object):\n <function token>\n <function token>\n\n def collides(self, other):\n if (self.maxV.x < other.minV.x or self.maxV.y < other.minV.y or \n self.minV.x > other.maxV.x or self.minV.y > other.maxV.y):\n return False\n return True\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n <function token>\n <function token>\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n\n\nclass AABB(object):\n <function token>\n <function token>\n <function token>\n\n def center(self):\n return self.minV.add(self.maxV).multiply(0.5)\n <function token>\n <function token>\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n\n\nclass AABB(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n"
] | false |
98,652 |
687eb006b3f63faf6793506ffc788cbc08f74343
|
from django.shortcuts import render#,render_to_response
from .forms import log_form
from django.http import HttpResponseRedirect
from .models import account
from django.contrib import messages
# from django.core import
# from .back_auth import auth
# from django.template import RequestContext
# Create your views here.
def login_view(request):
if(request.method == "POST"):
login_form = log_form(request.POST) # populate form with data
if(login_form.is_valid()):
# auths =auth()
inst = login_form.cleaned_data
# print(inst) ******* DEBUG
# au = auths.authenticate(inst['iam'],inst['alias'],inst['passwd'])
if((inst['passwd'] == account.objects.get(alias=inst['alias']).passwd)): #and (inst['iam'] in account.objects.get(iam=inst('iam')))):
request.session['cred']=[inst]
return HttpResponseRedirect('/account')
else:
messages.add_message(request,messages.ERROR,'Incorrect credentials or password')
return render(request,'login.html',{'form':login_form,'inc_cr':'Incorrect credentials or password'})
# return HttpResponseRedirect(reverse('login:login'))
else:
return render(request,'login.html',{'form':login_form,'inc_cr':'Incorrect credentials or password'})
else:
login_form = log_form()
# return render(request,'login.html')
return render(request,'login.html',{'form':login_form,'inc_cr':''})
|
[
"from django.shortcuts import render#,render_to_response\nfrom .forms import log_form\nfrom django.http import HttpResponseRedirect\nfrom .models import account\nfrom django.contrib import messages\n# from django.core import \n# from .back_auth import auth\n# from django.template import RequestContext \n\n# Create your views here.\ndef login_view(request):\n if(request.method == \"POST\"):\n login_form = log_form(request.POST) # populate form with data\n if(login_form.is_valid()):\n # auths =auth()\n inst = login_form.cleaned_data\n # print(inst) ******* DEBUG\n # au = auths.authenticate(inst['iam'],inst['alias'],inst['passwd'])\n if((inst['passwd'] == account.objects.get(alias=inst['alias']).passwd)): #and (inst['iam'] in account.objects.get(iam=inst('iam')))):\n request.session['cred']=[inst]\n return HttpResponseRedirect('/account')\n else:\n messages.add_message(request,messages.ERROR,'Incorrect credentials or password')\n return render(request,'login.html',{'form':login_form,'inc_cr':'Incorrect credentials or password'})\n # return HttpResponseRedirect(reverse('login:login'))\n else:\n return render(request,'login.html',{'form':login_form,'inc_cr':'Incorrect credentials or password'})\n\n else:\n login_form = log_form()\n # return render(request,'login.html')\n\n return render(request,'login.html',{'form':login_form,'inc_cr':''})",
"from django.shortcuts import render\nfrom .forms import log_form\nfrom django.http import HttpResponseRedirect\nfrom .models import account\nfrom django.contrib import messages\n\n\ndef login_view(request):\n if request.method == 'POST':\n login_form = log_form(request.POST)\n if login_form.is_valid():\n inst = login_form.cleaned_data\n if inst['passwd'] == account.objects.get(alias=inst['alias']\n ).passwd:\n request.session['cred'] = [inst]\n return HttpResponseRedirect('/account')\n else:\n messages.add_message(request, messages.ERROR,\n 'Incorrect credentials or password')\n return render(request, 'login.html', {'form': login_form,\n 'inc_cr': 'Incorrect credentials or password'})\n else:\n return render(request, 'login.html', {'form': login_form,\n 'inc_cr': 'Incorrect credentials or password'})\n else:\n login_form = log_form()\n return render(request, 'login.html', {'form': login_form, 'inc_cr': ''})\n",
"<import token>\n\n\ndef login_view(request):\n if request.method == 'POST':\n login_form = log_form(request.POST)\n if login_form.is_valid():\n inst = login_form.cleaned_data\n if inst['passwd'] == account.objects.get(alias=inst['alias']\n ).passwd:\n request.session['cred'] = [inst]\n return HttpResponseRedirect('/account')\n else:\n messages.add_message(request, messages.ERROR,\n 'Incorrect credentials or password')\n return render(request, 'login.html', {'form': login_form,\n 'inc_cr': 'Incorrect credentials or password'})\n else:\n return render(request, 'login.html', {'form': login_form,\n 'inc_cr': 'Incorrect credentials or password'})\n else:\n login_form = log_form()\n return render(request, 'login.html', {'form': login_form, 'inc_cr': ''})\n",
"<import token>\n<function token>\n"
] | false |
98,653 |
4fd7119e57c507974a7301601614f1b4445d1aff
|
#!/usr/bin/python
from sys import argv
# Skrypt wypisujący z pliku positions.txt kolejne polozenia 1 czasteczki - trzeba najpierw włączyć w .cpp generowanie tego pliku
if len(argv) == 2:
gr = open ("config.dat")
for l in gr.readlines():
a=l.split()
if len (a) < 3:
continue
if a[0] == "NATOMS":
natoms = int (a[2])
if len(argv) != 2 and len(argv) != 3:
print "Incorrect number of parameters!\nUsage: %s [which_molecule] -- default config.dat search for natoms" % argv[0]
print "OR: %s [natoms] [which_molecule]" %argv[0]
exit(1)
fp = open ("positions.txt")
#natoms = int(argv[1])
which = int(argv[2])
while True:
for i in range (natoms):
l = fp.readline().strip().split()
if int(l[0]) == which:
print l[1]+" "+l[2]+" "+l[3]
|
[
"#!/usr/bin/python\nfrom sys import argv\n\n# Skrypt wypisujący z pliku positions.txt kolejne polozenia 1 czasteczki - trzeba najpierw włączyć w .cpp generowanie tego pliku\n\nif len(argv) == 2:\n\tgr = open (\"config.dat\")\n\tfor l in gr.readlines():\n\t\ta=l.split()\n\t\tif len (a) < 3:\n\t\t\tcontinue\n\t\tif a[0] == \"NATOMS\":\n\t\t\tnatoms = int (a[2])\n\nif len(argv) != 2 and len(argv) != 3:\n\tprint \"Incorrect number of parameters!\\nUsage: %s [which_molecule] -- default config.dat search for natoms\" % argv[0]\n\tprint \"OR: %s [natoms] [which_molecule]\" %argv[0]\n\texit(1)\n\nfp = open (\"positions.txt\")\n\n#natoms = int(argv[1])\nwhich = int(argv[2])\n\nwhile True:\n\tfor i in range (natoms):\n\t\tl = fp.readline().strip().split()\n\t\tif int(l[0]) == which:\n\t\t\tprint l[1]+\" \"+l[2]+\" \"+l[3]\n\t\n"
] | true |
98,654 |
1f10f86910f01afcdf919333aca260c647449439
|
from django.conf.urls import include,url #se agrega include
from . import views
urlpatterns = [
#url(r'^$',views.listar_eventos, name='home'), #se redirecciona a la url del blog
url(r'^grado/nuevo/$', views.grado_nuevo, name = 'detEvento'), #se redirecciona a la url del blog
]
|
[
"from django.conf.urls import include,url #se agrega include\nfrom . import views\n\nurlpatterns = [\n #url(r'^$',views.listar_eventos, name='home'), #se redirecciona a la url del blog\n url(r'^grado/nuevo/$', views.grado_nuevo, name = 'detEvento'), #se redirecciona a la url del blog\n\n]\n",
"from django.conf.urls import include, url\nfrom . import views\nurlpatterns = [url('^grado/nuevo/$', views.grado_nuevo, name='detEvento')]\n",
"<import token>\nurlpatterns = [url('^grado/nuevo/$', views.grado_nuevo, name='detEvento')]\n",
"<import token>\n<assignment token>\n"
] | false |
98,655 |
c37f1bbe351ff7dc9ad947caf5e1bb67d06994ef
|
import os
import pickle
import numpy as np
#import tensorflow as tf
#from sklearn.metrics.pairwise import cosine_similarity
from scipy.spatial import distance
model_path = './model/'
loss_model = 'nce'
#loss_model = 'cross_entropy'
model_filepath = os.path.join(model_path, 'word2vec_%s.model'%(loss_model))
dictionary, steps, embeddings = pickle.load(open(model_filepath, 'rb'))
"""
==========================================================================
Write code to evaluate a relation between pairs of words.
You can access your trained model via dictionary and embeddings.
dictionary[word] will give you word_id
and embeddings[word_id] will return the embedding for that word.
word_id = dictionary[word]
v1 = embeddings[word_id]
or simply
v1 = embeddings[dictionary[word_id]]
==========================================================================
"""
filepath='./word_analogy_dev.txt'
#filepath1='./predicted_file_batch.txt'
example_set=[]
choices_set=[]
#cosine_sim=[]
f=open("test_nce.txt","w")
#avg_set=[]
with open (filepath) as fp:
contents = fp.readlines()
fp.close
for c in contents:
differences_exp=[]
c=c.replace("\"","")
c=c.replace("\n","")
examples=c.split('||')
#print(examples)
example_words=examples[0].split(',')
#print(example_words)
choice_words=examples[1].split(',')
#print(choice_words)
for i in example_words:
pairs=i.split(':')
#print(pairs[0])
differences_exp.append(np.subtract(embeddings[dictionary[pairs[0]]],embeddings[dictionary[pairs[1]]]))
avg=np.mean(differences_exp)
choices_sim=[]
for j in choice_words:
pairs_choice=j.split(':')
diff_choice=np.subtract(embeddings[dictionary[pairs_choice[0]]],embeddings[dictionary[pairs_choice[1]]])
cosine_sim=(1-(distance.cosine(diff_choice,avg)))
f.write('"{}"'.format(j)+ " ")
choices_sim.append(cosine_sim)
#print(choices_sim)
most_sim=(choice_words[choices_sim.index(max(choices_sim))])
least_sim=(choice_words[choices_sim.index(min(choices_sim))])
f.write('"{}"'.format(least_sim)+ " ")
f.write('"{}"'.format(most_sim))
f.write("\n")
f.close()
#
#
#
#
|
[
"import os\nimport pickle\nimport numpy as np\n#import tensorflow as tf\n#from sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.spatial import distance\nmodel_path = './model/'\nloss_model = 'nce'\n#loss_model = 'cross_entropy'\n\nmodel_filepath = os.path.join(model_path, 'word2vec_%s.model'%(loss_model))\n\n\ndictionary, steps, embeddings = pickle.load(open(model_filepath, 'rb'))\n\n\"\"\"\n==========================================================================\n\nWrite code to evaluate a relation between pairs of words.\nYou can access your trained model via dictionary and embeddings.\ndictionary[word] will give you word_id\nand embeddings[word_id] will return the embedding for that word.\n\nword_id = dictionary[word]\nv1 = embeddings[word_id]\n\nor simply\n\nv1 = embeddings[dictionary[word_id]]\n\n==========================================================================\n\"\"\"\nfilepath='./word_analogy_dev.txt'\n#filepath1='./predicted_file_batch.txt'\nexample_set=[]\nchoices_set=[]\n#cosine_sim=[]\nf=open(\"test_nce.txt\",\"w\")\n#avg_set=[]\n\n\nwith open (filepath) as fp:\n contents = fp.readlines()\nfp.close\nfor c in contents:\n differences_exp=[]\n \n c=c.replace(\"\\\"\",\"\")\n c=c.replace(\"\\n\",\"\")\n examples=c.split('||')\n #print(examples)\n example_words=examples[0].split(',')\n #print(example_words)\n choice_words=examples[1].split(',')\n #print(choice_words)\n for i in example_words:\n \n pairs=i.split(':')\n #print(pairs[0])\n differences_exp.append(np.subtract(embeddings[dictionary[pairs[0]]],embeddings[dictionary[pairs[1]]]))\n avg=np.mean(differences_exp)\n choices_sim=[]\n for j in choice_words:\n pairs_choice=j.split(':')\n diff_choice=np.subtract(embeddings[dictionary[pairs_choice[0]]],embeddings[dictionary[pairs_choice[1]]])\n cosine_sim=(1-(distance.cosine(diff_choice,avg))) \n f.write('\"{}\"'.format(j)+ \" \")\n choices_sim.append(cosine_sim)\n #print(choices_sim)\n most_sim=(choice_words[choices_sim.index(max(choices_sim))])\n least_sim=(choice_words[choices_sim.index(min(choices_sim))])\n f.write('\"{}\"'.format(least_sim)+ \" \")\n f.write('\"{}\"'.format(most_sim))\n f.write(\"\\n\")\nf.close()\n# \n#\n# \n# \n ",
"import os\nimport pickle\nimport numpy as np\nfrom scipy.spatial import distance\nmodel_path = './model/'\nloss_model = 'nce'\nmodel_filepath = os.path.join(model_path, 'word2vec_%s.model' % loss_model)\ndictionary, steps, embeddings = pickle.load(open(model_filepath, 'rb'))\n<docstring token>\nfilepath = './word_analogy_dev.txt'\nexample_set = []\nchoices_set = []\nf = open('test_nce.txt', 'w')\nwith open(filepath) as fp:\n contents = fp.readlines()\nfp.close\nfor c in contents:\n differences_exp = []\n c = c.replace('\"', '')\n c = c.replace('\\n', '')\n examples = c.split('||')\n example_words = examples[0].split(',')\n choice_words = examples[1].split(',')\n for i in example_words:\n pairs = i.split(':')\n differences_exp.append(np.subtract(embeddings[dictionary[pairs[0]]],\n embeddings[dictionary[pairs[1]]]))\n avg = np.mean(differences_exp)\n choices_sim = []\n for j in choice_words:\n pairs_choice = j.split(':')\n diff_choice = np.subtract(embeddings[dictionary[pairs_choice[0]]],\n embeddings[dictionary[pairs_choice[1]]])\n cosine_sim = 1 - distance.cosine(diff_choice, avg)\n f.write('\"{}\"'.format(j) + ' ')\n choices_sim.append(cosine_sim)\n most_sim = choice_words[choices_sim.index(max(choices_sim))]\n least_sim = choice_words[choices_sim.index(min(choices_sim))]\n f.write('\"{}\"'.format(least_sim) + ' ')\n f.write('\"{}\"'.format(most_sim))\n f.write('\\n')\nf.close()\n",
"<import token>\nmodel_path = './model/'\nloss_model = 'nce'\nmodel_filepath = os.path.join(model_path, 'word2vec_%s.model' % loss_model)\ndictionary, steps, embeddings = pickle.load(open(model_filepath, 'rb'))\n<docstring token>\nfilepath = './word_analogy_dev.txt'\nexample_set = []\nchoices_set = []\nf = open('test_nce.txt', 'w')\nwith open(filepath) as fp:\n contents = fp.readlines()\nfp.close\nfor c in contents:\n differences_exp = []\n c = c.replace('\"', '')\n c = c.replace('\\n', '')\n examples = c.split('||')\n example_words = examples[0].split(',')\n choice_words = examples[1].split(',')\n for i in example_words:\n pairs = i.split(':')\n differences_exp.append(np.subtract(embeddings[dictionary[pairs[0]]],\n embeddings[dictionary[pairs[1]]]))\n avg = np.mean(differences_exp)\n choices_sim = []\n for j in choice_words:\n pairs_choice = j.split(':')\n diff_choice = np.subtract(embeddings[dictionary[pairs_choice[0]]],\n embeddings[dictionary[pairs_choice[1]]])\n cosine_sim = 1 - distance.cosine(diff_choice, avg)\n f.write('\"{}\"'.format(j) + ' ')\n choices_sim.append(cosine_sim)\n most_sim = choice_words[choices_sim.index(max(choices_sim))]\n least_sim = choice_words[choices_sim.index(min(choices_sim))]\n f.write('\"{}\"'.format(least_sim) + ' ')\n f.write('\"{}\"'.format(most_sim))\n f.write('\\n')\nf.close()\n",
"<import token>\n<assignment token>\n<docstring token>\n<assignment token>\nwith open(filepath) as fp:\n contents = fp.readlines()\nfp.close\nfor c in contents:\n differences_exp = []\n c = c.replace('\"', '')\n c = c.replace('\\n', '')\n examples = c.split('||')\n example_words = examples[0].split(',')\n choice_words = examples[1].split(',')\n for i in example_words:\n pairs = i.split(':')\n differences_exp.append(np.subtract(embeddings[dictionary[pairs[0]]],\n embeddings[dictionary[pairs[1]]]))\n avg = np.mean(differences_exp)\n choices_sim = []\n for j in choice_words:\n pairs_choice = j.split(':')\n diff_choice = np.subtract(embeddings[dictionary[pairs_choice[0]]],\n embeddings[dictionary[pairs_choice[1]]])\n cosine_sim = 1 - distance.cosine(diff_choice, avg)\n f.write('\"{}\"'.format(j) + ' ')\n choices_sim.append(cosine_sim)\n most_sim = choice_words[choices_sim.index(max(choices_sim))]\n least_sim = choice_words[choices_sim.index(min(choices_sim))]\n f.write('\"{}\"'.format(least_sim) + ' ')\n f.write('\"{}\"'.format(most_sim))\n f.write('\\n')\nf.close()\n",
"<import token>\n<assignment token>\n<docstring token>\n<assignment token>\n<code token>\n"
] | false |
98,656 |
b361223ee3bb02afe4801552d17d3ce7fef6d465
|
def load(file_name):
"""
Returns a list which containing .opam file data line by line.
It opens file in read mode and split that line by line and
append it to he file_data.
"""
file_data = []
with io.open(file_name, "r", encoding="utf-8") as f:
file_data = [line.rstrip('\n') for line in f]
return file_data
def get_version(file_data):
"""
Return the value of opam-version.
"""
for individual in file_data:
if 'opam-version' in individual:
version = individual.split('"')
return version[1]
def get_maintainer(file_data):
"""
Return the value of maintainer.
"""
for individual in file_data:
if 'maintainer' in individual:
maintainer = individual.split('"')
return maintainer[1]
def get_synopsis(file_data):
"""
Return the value of synopsis.
"""
for individual in file_data:
if 'synopsis' in individual:
synopsis = individual.split('"')
return synopsis[1]
|
[
"def load(file_name):\n \"\"\"\n Returns a list which containing .opam file data line by line.\n It opens file in read mode and split that line by line and\n append it to he file_data.\n \"\"\"\n file_data = []\n with io.open(file_name, \"r\", encoding=\"utf-8\") as f:\n file_data = [line.rstrip('\\n') for line in f]\n return file_data\n\n\ndef get_version(file_data):\n \"\"\"\n Return the value of opam-version.\n \"\"\"\n for individual in file_data:\n if 'opam-version' in individual:\n version = individual.split('\"')\n return version[1]\n\n\ndef get_maintainer(file_data):\n \"\"\"\n Return the value of maintainer.\n \"\"\"\n for individual in file_data:\n if 'maintainer' in individual:\n maintainer = individual.split('\"')\n return maintainer[1]\n\n\ndef get_synopsis(file_data):\n \"\"\"\n Return the value of synopsis.\n \"\"\"\n for individual in file_data:\n if 'synopsis' in individual:\n synopsis = individual.split('\"')\n return synopsis[1]\n\n",
"def load(file_name):\n \"\"\"\n Returns a list which containing .opam file data line by line.\n It opens file in read mode and split that line by line and\n append it to he file_data.\n \"\"\"\n file_data = []\n with io.open(file_name, 'r', encoding='utf-8') as f:\n file_data = [line.rstrip('\\n') for line in f]\n return file_data\n\n\ndef get_version(file_data):\n \"\"\"\n Return the value of opam-version.\n \"\"\"\n for individual in file_data:\n if 'opam-version' in individual:\n version = individual.split('\"')\n return version[1]\n\n\ndef get_maintainer(file_data):\n \"\"\"\n Return the value of maintainer.\n \"\"\"\n for individual in file_data:\n if 'maintainer' in individual:\n maintainer = individual.split('\"')\n return maintainer[1]\n\n\ndef get_synopsis(file_data):\n \"\"\"\n Return the value of synopsis.\n \"\"\"\n for individual in file_data:\n if 'synopsis' in individual:\n synopsis = individual.split('\"')\n return synopsis[1]\n",
"<function token>\n\n\ndef get_version(file_data):\n \"\"\"\n Return the value of opam-version.\n \"\"\"\n for individual in file_data:\n if 'opam-version' in individual:\n version = individual.split('\"')\n return version[1]\n\n\ndef get_maintainer(file_data):\n \"\"\"\n Return the value of maintainer.\n \"\"\"\n for individual in file_data:\n if 'maintainer' in individual:\n maintainer = individual.split('\"')\n return maintainer[1]\n\n\ndef get_synopsis(file_data):\n \"\"\"\n Return the value of synopsis.\n \"\"\"\n for individual in file_data:\n if 'synopsis' in individual:\n synopsis = individual.split('\"')\n return synopsis[1]\n",
"<function token>\n\n\ndef get_version(file_data):\n \"\"\"\n Return the value of opam-version.\n \"\"\"\n for individual in file_data:\n if 'opam-version' in individual:\n version = individual.split('\"')\n return version[1]\n\n\ndef get_maintainer(file_data):\n \"\"\"\n Return the value of maintainer.\n \"\"\"\n for individual in file_data:\n if 'maintainer' in individual:\n maintainer = individual.split('\"')\n return maintainer[1]\n\n\n<function token>\n",
"<function token>\n\n\ndef get_version(file_data):\n \"\"\"\n Return the value of opam-version.\n \"\"\"\n for individual in file_data:\n if 'opam-version' in individual:\n version = individual.split('\"')\n return version[1]\n\n\n<function token>\n<function token>\n",
"<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,657 |
1821bdccf9a8a31b12dfb076ca9c0832771e2d3a
|
#https://pintia.cn/problem-sets/951072707007700992/problems/977489194356715520
#实现了AVLTree的put功能
#没有实现delete功能
class Node:
def __init__(self,data):
self.data = data
self.left = None
self.right = None
self.height = 0
class AVLTree:
def __init__(self):
self.root = None
def height(self,node):
if node is None:
return -1
else:
return node.height
#破坏者在被破坏者的左子树的左子树上
def LLrotate(self,node):
temp = node.left
node.left = temp.right
temp.right = node
node.height = max(self.height(node.left),self.height(node.right))+1
temp.height = max(self.height(temp.left),self.height(node.right))+1
return temp
def RRrotate(self,node):
temp = node.right
node.right = temp.left
temp.left = node
node.height = max(self.height(node.left),self.height(node.right))+1
temp.height = max(self.height(node.left),self.height(node.right))+1
return temp
#破坏者在被破坏者的右子树的左子树上
#先做一次LL旋转再做一次RR旋转
def RLrotate(self,node):
node.right = self.LLrotate(node.right)
return self.RRrotate(node)
def LRrotare(self,node):
node.left = self.RRrotate(node.left)
return self.LLrotate(node)
def put(self,data):
if not self.root:
self.root = Node(data)
else:
self.root = self._put(data,self.root)
def _put(self,data,node):
if node is None:
node = Node(data)
elif data < node.data:
node.left = self._put(data,node.left)
if (self.height(node.left) - self.height(node.right)) == 2:
if data < node.left.data:
node = self.LLrotate(node)
else:
node = self.LRrotare(node)
elif data > node.data:
node.right = self._put(data,node.right)
if (self.height(node.right) - self.height(node.left)) == 2:
if data < node.right.data:
node = self.RLrotate(node)
else:
node = self.RRrotate(node)
node.height = max(self.height(node.left),self.height(node.right)) + 1
return node
def main():
N = input()
li = input().split()
tree = AVLTree()
for i in li:
tree.put(int(i))
print(tree.root.data)
main()
|
[
"#https://pintia.cn/problem-sets/951072707007700992/problems/977489194356715520\n#实现了AVLTree的put功能\n#没有实现delete功能\n\nclass Node:\n\tdef __init__(self,data):\n\t\tself.data = data\n\t\tself.left = None\n\t\tself.right = None\n\t\tself.height = 0\n\t\t\nclass AVLTree:\n\tdef __init__(self):\n\t\tself.root = None\n\t\t\n\tdef height(self,node):\n\t\tif node is None:\n\t\t\treturn -1\n\t\telse:\n\t\t\treturn node.height\n\t#破坏者在被破坏者的左子树的左子树上\t\n\tdef LLrotate(self,node):\n\t\ttemp = node.left\n\t\tnode.left = temp.right\n\t\ttemp.right = node\n\t\tnode.height = max(self.height(node.left),self.height(node.right))+1\n\t\ttemp.height = max(self.height(temp.left),self.height(node.right))+1\n\t\treturn temp\n\t\n\tdef RRrotate(self,node):\n\t\ttemp = node.right\n\t\tnode.right = temp.left\n\t\ttemp.left = node\n\t\tnode.height = max(self.height(node.left),self.height(node.right))+1\n\t\ttemp.height = max(self.height(node.left),self.height(node.right))+1\n\t\treturn temp\n\t#破坏者在被破坏者的右子树的左子树上\n\t#先做一次LL旋转再做一次RR旋转\n\tdef RLrotate(self,node):\n\t\tnode.right = self.LLrotate(node.right)\n\t\treturn self.RRrotate(node)\n\t\t\n\tdef LRrotare(self,node):\n\t\tnode.left = self.RRrotate(node.left)\n\t\treturn self.LLrotate(node)\n\t\t\n\tdef put(self,data):\n\t\tif not self.root:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tself.root = self._put(data,self.root)\n\tdef _put(self,data,node):\n\t\tif node is None:\n\t\t\tnode = Node(data)\n\t\telif data < node.data:\n\t\t\tnode.left = self._put(data,node.left)\n\t\t\tif (self.height(node.left) - self.height(node.right)) == 2:\n\t\t\t\tif data < node.left.data:\n\t\t\t\t\tnode = self.LLrotate(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = self.LRrotare(node)\n\t\telif data > node.data:\n\t\t\tnode.right = self._put(data,node.right)\n\t\t\tif (self.height(node.right) - self.height(node.left)) == 2:\n\t\t\t\tif data < node.right.data:\n\t\t\t\t\tnode = self.RLrotate(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = self.RRrotate(node)\n\t\t\t\n\t\tnode.height = max(self.height(node.left),self.height(node.right)) + 1\n\t\treturn node\n\t\t\t\ndef main():\n\tN = input()\n\tli = input().split()\n\ttree = AVLTree()\n\tfor i in li:\n\t\ttree.put(int(i))\n\tprint(tree.root.data)\nmain()",
"class Node:\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.height = 0\n\n\nclass AVLTree:\n\n def __init__(self):\n self.root = None\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n\n def LLrotate(self, node):\n temp = node.left\n node.left = temp.right\n temp.right = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(temp.left), self.height(node.right)) + 1\n return temp\n\n def RRrotate(self, node):\n temp = node.right\n node.right = temp.left\n temp.left = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(node.left), self.height(node.right)) + 1\n return temp\n\n def RLrotate(self, node):\n node.right = self.LLrotate(node.right)\n return self.RRrotate(node)\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n\n def _put(self, data, node):\n if node is None:\n node = Node(data)\n elif data < node.data:\n node.left = self._put(data, node.left)\n if self.height(node.left) - self.height(node.right) == 2:\n if data < node.left.data:\n node = self.LLrotate(node)\n else:\n node = self.LRrotare(node)\n elif data > node.data:\n node.right = self._put(data, node.right)\n if self.height(node.right) - self.height(node.left) == 2:\n if data < node.right.data:\n node = self.RLrotate(node)\n else:\n node = self.RRrotate(node)\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n return node\n\n\ndef main():\n N = input()\n li = input().split()\n tree = AVLTree()\n for i in li:\n tree.put(int(i))\n print(tree.root.data)\n\n\nmain()\n",
"class Node:\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.height = 0\n\n\nclass AVLTree:\n\n def __init__(self):\n self.root = None\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n\n def LLrotate(self, node):\n temp = node.left\n node.left = temp.right\n temp.right = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(temp.left), self.height(node.right)) + 1\n return temp\n\n def RRrotate(self, node):\n temp = node.right\n node.right = temp.left\n temp.left = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(node.left), self.height(node.right)) + 1\n return temp\n\n def RLrotate(self, node):\n node.right = self.LLrotate(node.right)\n return self.RRrotate(node)\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n\n def _put(self, data, node):\n if node is None:\n node = Node(data)\n elif data < node.data:\n node.left = self._put(data, node.left)\n if self.height(node.left) - self.height(node.right) == 2:\n if data < node.left.data:\n node = self.LLrotate(node)\n else:\n node = self.LRrotare(node)\n elif data > node.data:\n node.right = self._put(data, node.right)\n if self.height(node.right) - self.height(node.left) == 2:\n if data < node.right.data:\n node = self.RLrotate(node)\n else:\n node = self.RRrotate(node)\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n return node\n\n\ndef main():\n N = input()\n li = input().split()\n tree = AVLTree()\n for i in li:\n tree.put(int(i))\n print(tree.root.data)\n\n\n<code token>\n",
"class Node:\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.height = 0\n\n\nclass AVLTree:\n\n def __init__(self):\n self.root = None\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n\n def LLrotate(self, node):\n temp = node.left\n node.left = temp.right\n temp.right = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(temp.left), self.height(node.right)) + 1\n return temp\n\n def RRrotate(self, node):\n temp = node.right\n node.right = temp.left\n temp.left = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(node.left), self.height(node.right)) + 1\n return temp\n\n def RLrotate(self, node):\n node.right = self.LLrotate(node.right)\n return self.RRrotate(node)\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n\n def _put(self, data, node):\n if node is None:\n node = Node(data)\n elif data < node.data:\n node.left = self._put(data, node.left)\n if self.height(node.left) - self.height(node.right) == 2:\n if data < node.left.data:\n node = self.LLrotate(node)\n else:\n node = self.LRrotare(node)\n elif data > node.data:\n node.right = self._put(data, node.right)\n if self.height(node.right) - self.height(node.left) == 2:\n if data < node.right.data:\n node = self.RLrotate(node)\n else:\n node = self.RRrotate(node)\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n return node\n\n\n<function token>\n<code token>\n",
"class Node:\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self):\n self.root = None\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n\n def LLrotate(self, node):\n temp = node.left\n node.left = temp.right\n temp.right = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(temp.left), self.height(node.right)) + 1\n return temp\n\n def RRrotate(self, node):\n temp = node.right\n node.right = temp.left\n temp.left = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(node.left), self.height(node.right)) + 1\n return temp\n\n def RLrotate(self, node):\n node.right = self.LLrotate(node.right)\n return self.RRrotate(node)\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n\n def _put(self, data, node):\n if node is None:\n node = Node(data)\n elif data < node.data:\n node.left = self._put(data, node.left)\n if self.height(node.left) - self.height(node.right) == 2:\n if data < node.left.data:\n node = self.LLrotate(node)\n else:\n node = self.LRrotare(node)\n elif data > node.data:\n node.right = self._put(data, node.right)\n if self.height(node.right) - self.height(node.left) == 2:\n if data < node.right.data:\n node = self.RLrotate(node)\n else:\n node = self.RRrotate(node)\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n return node\n\n\n<function token>\n<code token>\n",
"<class token>\n\n\nclass AVLTree:\n\n def __init__(self):\n self.root = None\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n\n def LLrotate(self, node):\n temp = node.left\n node.left = temp.right\n temp.right = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(temp.left), self.height(node.right)) + 1\n return temp\n\n def RRrotate(self, node):\n temp = node.right\n node.right = temp.left\n temp.left = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(node.left), self.height(node.right)) + 1\n return temp\n\n def RLrotate(self, node):\n node.right = self.LLrotate(node.right)\n return self.RRrotate(node)\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n\n def _put(self, data, node):\n if node is None:\n node = Node(data)\n elif data < node.data:\n node.left = self._put(data, node.left)\n if self.height(node.left) - self.height(node.right) == 2:\n if data < node.left.data:\n node = self.LLrotate(node)\n else:\n node = self.LRrotare(node)\n elif data > node.data:\n node.right = self._put(data, node.right)\n if self.height(node.right) - self.height(node.left) == 2:\n if data < node.right.data:\n node = self.RLrotate(node)\n else:\n node = self.RRrotate(node)\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n return node\n\n\n<function token>\n<code token>\n",
"<class token>\n\n\nclass AVLTree:\n\n def __init__(self):\n self.root = None\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n <function token>\n\n def RRrotate(self, node):\n temp = node.right\n node.right = temp.left\n temp.left = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(node.left), self.height(node.right)) + 1\n return temp\n\n def RLrotate(self, node):\n node.right = self.LLrotate(node.right)\n return self.RRrotate(node)\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n\n def _put(self, data, node):\n if node is None:\n node = Node(data)\n elif data < node.data:\n node.left = self._put(data, node.left)\n if self.height(node.left) - self.height(node.right) == 2:\n if data < node.left.data:\n node = self.LLrotate(node)\n else:\n node = self.LRrotare(node)\n elif data > node.data:\n node.right = self._put(data, node.right)\n if self.height(node.right) - self.height(node.left) == 2:\n if data < node.right.data:\n node = self.RLrotate(node)\n else:\n node = self.RRrotate(node)\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n return node\n\n\n<function token>\n<code token>\n",
"<class token>\n\n\nclass AVLTree:\n\n def __init__(self):\n self.root = None\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n <function token>\n\n def RRrotate(self, node):\n temp = node.right\n node.right = temp.left\n temp.left = node\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n temp.height = max(self.height(node.left), self.height(node.right)) + 1\n return temp\n <function token>\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n\n def _put(self, data, node):\n if node is None:\n node = Node(data)\n elif data < node.data:\n node.left = self._put(data, node.left)\n if self.height(node.left) - self.height(node.right) == 2:\n if data < node.left.data:\n node = self.LLrotate(node)\n else:\n node = self.LRrotare(node)\n elif data > node.data:\n node.right = self._put(data, node.right)\n if self.height(node.right) - self.height(node.left) == 2:\n if data < node.right.data:\n node = self.RLrotate(node)\n else:\n node = self.RRrotate(node)\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n return node\n\n\n<function token>\n<code token>\n",
"<class token>\n\n\nclass AVLTree:\n\n def __init__(self):\n self.root = None\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n <function token>\n <function token>\n <function token>\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n\n def _put(self, data, node):\n if node is None:\n node = Node(data)\n elif data < node.data:\n node.left = self._put(data, node.left)\n if self.height(node.left) - self.height(node.right) == 2:\n if data < node.left.data:\n node = self.LLrotate(node)\n else:\n node = self.LRrotare(node)\n elif data > node.data:\n node.right = self._put(data, node.right)\n if self.height(node.right) - self.height(node.left) == 2:\n if data < node.right.data:\n node = self.RLrotate(node)\n else:\n node = self.RRrotate(node)\n node.height = max(self.height(node.left), self.height(node.right)) + 1\n return node\n\n\n<function token>\n<code token>\n",
"<class token>\n\n\nclass AVLTree:\n\n def __init__(self):\n self.root = None\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n <function token>\n <function token>\n <function token>\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n <function token>\n\n\n<function token>\n<code token>\n",
"<class token>\n\n\nclass AVLTree:\n <function token>\n\n def height(self, node):\n if node is None:\n return -1\n else:\n return node.height\n <function token>\n <function token>\n <function token>\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n <function token>\n\n\n<function token>\n<code token>\n",
"<class token>\n\n\nclass AVLTree:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n\n def put(self, data):\n if not self.root:\n self.root = Node(data)\n else:\n self.root = self._put(data, self.root)\n <function token>\n\n\n<function token>\n<code token>\n",
"<class token>\n\n\nclass AVLTree:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def LRrotare(self, node):\n node.left = self.RRrotate(node.left)\n return self.LLrotate(node)\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<class token>\n\n\nclass AVLTree:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<class token>\n<class token>\n<function token>\n<code token>\n"
] | false |
98,658 |
9eaf3bd7a1779785f195d06b96613455a8a6416f
|
import sys
from math import log2
from typing import Union, Tuple
import hypothesis.strategies as st
from hypothesis import given, settings
class Node:
"""Node class for AVL tree. Implements many useful methods."""
def __init__(self, key: int):
# the node determines its subtree also
self.key: int = key
self.sum: int = self.key # sum of keys in subtree
self.height: int = 1 # height of subtree
self.size: int = 1 # size of subtree
self.balance_factor: int = 0
self.parent: Union[Node, None] = None
self.left: Union[Node, None] = None
self.right: Union[Node, None] = None
def __repr__(self):
return f"Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, " \
f"size={self.size}, sum={self.sum})"
def __iter__(self):
"""In-order traversal of subtree."""
if self.left is not None:
yield from self.left
yield self
if self.right is not None:
yield from self.right
def __getitem__(self, item: int, base: int = 1) -> "Node":
"""Access element by index."""
item += base ^ 1
left_size = self.left.size if self.left is not None else 0
if 0 < item < left_size + 1:
return self.left.__getitem__(item)
elif item == left_size + 1:
return self
elif left_size + 1 < item <= self.size:
return self.right.__getitem__(item - left_size - 1)
else:
raise IndexError(f"Index out of range: {item - base ^ 1}")
def __erase_links(self):
self.parent = None
self.left = None
self.right = None
def __update_height(self):
left_height = self.left.height if self.left is not None else 0
right_height = self.right.height if self.right is not None else 0
self.height = 1 + max(left_height, right_height)
self.balance_factor = right_height - left_height
def __update_sum(self):
left_sum = self.left.sum if self.left is not None else 0
right_sum = self.right.sum if self.right is not None else 0
self.sum = self.key + left_sum + right_sum
def __update_size(self):
left_size = self.left.size if self.left is not None else 0
right_size = self.right.size if self.right is not None else 0
self.size = 1 + left_size + right_size
def __update_all(self):
self.__update_height()
self.__update_size()
self.__update_sum()
def __rotate_left(self) -> "Node":
"""Perform a left AVL rotation on the node. Returns new root of the subtree."""
lower = self.right
# add left subtree of lower to upper (self) as right child and update links
self.right = lower.left
if self.right is not None:
self.right.parent = self
self.__update_all()
# update lower and upper links
parent = self.parent
lower.left = self
self.parent = lower
lower.__update_all()
# update parent links
lower.parent = parent
if parent is not None and parent.left == self:
parent.left = lower
elif parent is not None and parent.right == self:
parent.right = lower
return lower # new root of subtree
def __rotate_right(self) -> "Node":
"""Perform a right AVL rotation on the node. Returns new root of the subtree."""
lower = self.left
# add right subtree of lower to upper (self) as left child and update links
self.left = lower.right
if self.left is not None:
self.left.parent = self
self.__update_all()
# update lower and upper links
parent = self.parent
lower.right = self
self.parent = lower
lower.__update_all()
# update parent links
lower.parent = parent
if parent is not None and parent.left == self:
parent.left = lower
elif parent is not None and parent.right == self:
parent.right = lower
return lower # new root of subtree
def __balance(self) -> "Node":
"""Balance tree starting from current node. Return root node of the whole tree."""
current = self
while True:
current.__update_height()
if current.balance_factor == 2: # right subtree is higher
middle = current.right
if middle.balance_factor < 0: # left subtree of middle node is higher
middle.__rotate_right()
current = current.__rotate_left()
elif current.balance_factor == -2: # left subtree is higher
middle = current.left
if middle.balance_factor > 0: # right subtree of middle node is higher
middle.__rotate_left()
current = current.__rotate_right()
else:
current.__update_size()
current.__update_sum()
if current.parent is None:
return current # return root
current = current.parent # go up if not root
def add_left(self, key: int) -> "Node":
"""Add left child, return new root. Raises an exception when such child exists."""
if self.left is None:
if key >= self.key:
raise ValueError("Key isn't less than parent's key")
self.left = Node(key=key)
self.left.parent = self
else:
raise ValueError("Left child exists")
return self.__balance()
def add_right(self, key: int) -> "Node":
"""Add right child, return new root. Raises an exception when such child exists."""
if self.right is None:
if key <= self.key:
raise ValueError("Key isn't bigger than parent's key")
self.right = Node(key=key)
self.right.parent = self
else:
raise ValueError("Right child exists")
return self.__balance()
def min(self) -> "Node":
"""Return element with minimal key in this subtree."""
current = self
while current.left is not None:
current = current.left
return current
def max(self) -> "Node":
"""Return element with maximal key in this subtree."""
current = self
while current.right is not None:
current = current.right
return current
def predecessor(self) -> Union["Node", None]:
"""Return element with previous key."""
if self.left is not None: # case 1: the node has a left child
return self.left.max()
else: # case 2: the node does not have a left child
current = self
while current.parent is not None: # traverse up
if current == current.parent.right:
return current.parent
else:
current = current.parent
return None # the root is reached, so no predecessor exists
def successor(self) -> Union["Node", None]:
"""Return element with next key."""
if self.right is not None: # case 1: the node has a right child
return self.right.min()
else: # case 2: the node does not have a right child
current = self
while current.parent is not None: # traverse up
if current == current.parent.left:
return current.parent
else:
current = current.parent
return None # the root is reached, so no successor exists
def find(self, key) -> Union["Node", None]:
"""Return element with such key if present, otherwise None."""
current = self
while key != current.key:
if key < current.key:
current = current.left # traverse left
elif key > current.key:
current = current.right # traverse right
if current is None: # failure
break
return current
def remove(self):
"""Remove element from the tree, update links of adjacent nodes."""
parent = self.parent
if self.left is None and self.right is None: # case 1: no children
if parent is None:
return None # remove root
elif parent.left is self:
parent.left = None # remove left leaf
elif parent.right is self:
parent.right = None # remove right leaf
elif self.left is not None and self.right is None: # case 2: left child
left = self.left
left.parent = parent
self.left = None
if parent is None:
return left # left is new root
elif parent.left is self:
parent.left = left # update left leaf
elif parent.right is self:
parent.right = left # update right leaf
elif self.left is None and self.right is not None: # case 3: right child
right = self.right
right.parent = parent
self.right = None
if parent is None:
return right # right is new root
elif parent.left is self:
parent.left = right # update left leaf
elif parent.right is self:
parent.right = right # update right leaf
else: # case 4: both child
successor = self.successor()
self.key = successor.key # exchange keys
return successor.remove() # remove successor leaf
self.parent = None # remove last link from the node
return parent.__balance()
def split_sums(self, key: int) -> Tuple[int, int]:
"""Compute sum of all keys less than this one,
sum of all keys greater than this one and then return them."""
current = self
less, greater = 0, 0
while key != current.key:
if key < current.key:
# add all greater keys
greater += current.right.sum if current.right is not None else 0
greater += current.key
current = current.left # traverse left
elif key > current.key:
# add all smaller keys
less += current.left.sum if current.left is not None else 0
less += current.key
current = current.right # traverse right
if current is None: # key not found
break
if current is not None: # add from children
less += current.left.sum if current.left is not None else 0
greater += current.right.sum if current.right is not None else 0
return less, greater
class AVLTree:
def __init__(self, root=None):
self.root: Union[Node, None] = root
def __iter__(self):
"""In-order traversal of the tree."""
if self.empty():
return
for node in self.root:
yield node.key
def __getitem__(self, item: int) -> int:
"""Get key by index."""
return self.root[item].key
def empty(self) -> bool:
"""Check if the tree is empty."""
return True if self.root is None else False
@property
def size(self) -> int:
"""Return the number of nodes in the tree."""
return self.root.size if not self.empty() else 0
@property
def sum(self) -> int:
"""Return sum of all keys of the tree."""
return self.root.sum
@property
def height(self) -> int:
"""Return the height of the tree."""
return self.root.height if not self.empty() else 0
def clear(self):
"""Clear the tree."""
stack = [self.root]
while stack: # removing all links in loop
node = stack.pop()
if node is None:
continue
node.parent = None
if node.left is not None:
stack.append(node.left)
node.left = None
if node.right is not None:
stack.append(node.right)
node.right = None
self.root = None
def find(self, key: int) -> bool:
"""Return True if element with such key exists in the tree, otherwise False."""
if self.empty():
return False
return self.root.find(key) is not None
def insert(self, key: int) -> bool:
"""Insert key (create new element) in the tree
and return True on success or False on failure."""
if self.empty(): # empty tree, so value becomes the root
self.root = Node(key)
return True
current = self.root # start at the root
while current.key != key:
if key < current.key:
if current.left is None: # if no left child exists, insert element as left child
self.root = current.add_left(key=key)
return True
else: # if a left child does exist, traverse left
current = current.left
elif key > current.key:
if current.right is None: # if no right child exists, insert element as right child
self.root = current.add_right(key=key)
return True
else: # if a right child does exist, traverse right
current = current.right
return False # failure to insert
def remove(self, key: int) -> bool:
"""Remove element with such key if it exists in the tree (return True),
or return False otherwise."""
current = self.root.find(key) if not self.empty() else None
if current is None: # if no such key, failure
return False
self.root = current.remove() # update root
return True
def segment_sum(self, left, right):
"""Compute sum of all tree keys in segment [left, right]."""
if self.empty():
return 0
less, _ = self.root.split_sums(left)
_, greater = self.root.split_sums(right)
return self.sum - less - greater
def f(x: int, s: int):
return (x + s) % 1000000001
if __name__ == '__main__':
n = int(sys.stdin.readline().strip())
tree = AVLTree()
ls = 0 # last sum
for _ in range(n):
op, *args = sys.stdin.readline().strip().split(" ")
if op == "+":
arg = f(int(args[0]), ls)
tree.insert(arg)
elif op == "-":
arg = f(int(args[0]), ls)
tree.remove(arg)
elif op == "?":
arg = f(int(args[0]), ls)
print("Found" if tree.find(arg) else "Not found")
else:
args = map(lambda x: f(int(x), ls), args)
ls = tree.segment_sum(*args)
print(ls)
@given(st.lists(
st.integers(min_value=-10000, max_value=10000),
max_size=10000, unique=True))
@settings(max_examples=250)
def test_bst_properties(seq):
test_tree = AVLTree()
for x in seq:
test_tree.insert(x)
assert list(test_tree) == sorted(seq)
assert test_tree.size == len(seq)
assert test_tree.height <= 1.44 * log2(1 + len(seq))
if len(seq) >= 10:
assert test_tree.find(seq[5])
assert test_tree.find(seq[3])
assert test_tree.find(25) == (25 in seq)
assert test_tree.remove(seq.pop())
assert list(test_tree) == sorted(seq)
assert test_tree.remove(seq.pop())
assert list(test_tree) == sorted(seq)
assert test_tree.size == len(seq)
assert test_tree.find(seq[5])
assert test_tree.remove(seq.pop(len(seq) // 2))
assert list(test_tree) == sorted(seq)
assert test_tree[2] == sorted(seq)[1]
assert test_tree[test_tree.size] == sorted(seq)[len(seq)-1]
def test_operations():
test_tree = AVLTree()
# round one
s = 0
assert not test_tree.find(f(1, s)) # find 1
assert test_tree.insert(f(1, s)) # add 1
assert test_tree.find(f(1, s)) # find 1
assert test_tree.insert(f(2, s)) # add 2
s = test_tree.segment_sum(f(1, s), f(2, s)) # sum(1, 2)
assert s == 3
assert not test_tree.insert(f(1000000000, s)) # add 2
assert test_tree.find(f(1000000000, s)) # find 2
assert test_tree.remove(f(1000000000, s)) # remove 2
assert not test_tree.remove(f(1000000000, s)) # find 2
s = test_tree.segment_sum(f(999999999, s), f(1000000000, s)) # sum(1, 2)
assert s == 1
assert not test_tree.remove(f(2, s)) # remove 3
assert not test_tree.find(f(2, s)) # find 3
assert test_tree.remove(f(0, s)) # remove 1
assert test_tree.insert(f(9, s)) # add 10
s = test_tree.segment_sum(f(0, s), f(9, s)) # sum(1, 10)
assert s == 10
test_tree.clear()
# round two
s = 0
assert not test_tree.find(f(0, s))
assert test_tree.insert(f(0, s))
assert test_tree.find(f(0, s))
assert test_tree.remove(f(0, s))
assert not test_tree.find(f(0, s))
test_tree.clear()
# round three
s = 0
assert test_tree.insert(f(491572259, s))
assert test_tree.find(f(491572259, s))
assert not test_tree.find(f(899375874, s))
s = test_tree.segment_sum(f(310971296, s), f(877523306, s))
assert s == 491572259
assert test_tree.insert(f(352411209, s))
|
[
"import sys\nfrom math import log2\nfrom typing import Union, Tuple\n\nimport hypothesis.strategies as st\nfrom hypothesis import given, settings\n\n\nclass Node:\n \"\"\"Node class for AVL tree. Implements many useful methods.\"\"\"\n\n def __init__(self, key: int):\n # the node determines its subtree also\n self.key: int = key\n self.sum: int = self.key # sum of keys in subtree\n self.height: int = 1 # height of subtree\n self.size: int = 1 # size of subtree\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return f\"Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, \" \\\n f\"size={self.size}, sum={self.sum})\"\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int = 1) -> \"Node\":\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f\"Index out of range: {item - base ^ 1}\")\n\n def __erase_links(self):\n self.parent = None\n self.left = None\n self.right = None\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) -> \"Node\":\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n # add left subtree of lower to upper (self) as right child and update links\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n # update lower and upper links\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n # update parent links\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower # new root of subtree\n\n def __rotate_right(self) -> \"Node\":\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n # add right subtree of lower to upper (self) as left child and update links\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n # update lower and upper links\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n # update parent links\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower # new root of subtree\n\n def __balance(self) -> \"Node\":\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n\n while True:\n current.__update_height()\n\n if current.balance_factor == 2: # right subtree is higher\n middle = current.right\n if middle.balance_factor < 0: # left subtree of middle node is higher\n middle.__rotate_right()\n current = current.__rotate_left()\n\n elif current.balance_factor == -2: # left subtree is higher\n middle = current.left\n if middle.balance_factor > 0: # right subtree of middle node is higher\n middle.__rotate_left()\n current = current.__rotate_right()\n\n else:\n current.__update_size()\n current.__update_sum()\n\n if current.parent is None:\n return current # return root\n\n current = current.parent # go up if not root\n\n def add_left(self, key: int) -> \"Node\":\n \"\"\"Add left child, return new root. Raises an exception when such child exists.\"\"\"\n if self.left is None:\n if key >= self.key:\n raise ValueError(\"Key isn't less than parent's key\")\n self.left = Node(key=key)\n self.left.parent = self\n else:\n raise ValueError(\"Left child exists\")\n return self.__balance()\n\n def add_right(self, key: int) -> \"Node\":\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError(\"Right child exists\")\n return self.__balance()\n\n def min(self) -> \"Node\":\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) -> \"Node\":\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) -> Union[\"Node\", None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None: # case 1: the node has a left child\n return self.left.max()\n\n else: # case 2: the node does not have a left child\n current = self\n while current.parent is not None: # traverse up\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n\n return None # the root is reached, so no predecessor exists\n\n def successor(self) -> Union[\"Node\", None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None: # case 1: the node has a right child\n return self.right.min()\n\n else: # case 2: the node does not have a right child\n current = self\n while current.parent is not None: # traverse up\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n\n return None # the root is reached, so no successor exists\n\n def find(self, key) -> Union[\"Node\", None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left # traverse left\n\n elif key > current.key:\n current = current.right # traverse right\n\n if current is None: # failure\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None: # case 1: no children\n\n if parent is None:\n return None # remove root\n elif parent.left is self:\n parent.left = None # remove left leaf\n elif parent.right is self:\n parent.right = None # remove right leaf\n\n elif self.left is not None and self.right is None: # case 2: left child\n left = self.left\n left.parent = parent\n self.left = None\n\n if parent is None:\n return left # left is new root\n elif parent.left is self:\n parent.left = left # update left leaf\n elif parent.right is self:\n parent.right = left # update right leaf\n\n elif self.left is None and self.right is not None: # case 3: right child\n right = self.right\n right.parent = parent\n self.right = None\n\n if parent is None:\n return right # right is new root\n elif parent.left is self:\n parent.left = right # update left leaf\n elif parent.right is self:\n parent.right = right # update right leaf\n\n else: # case 4: both child\n successor = self.successor()\n self.key = successor.key # exchange keys\n return successor.remove() # remove successor leaf\n\n self.parent = None # remove last link from the node\n return parent.__balance()\n\n def split_sums(self, key: int) -> Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n\n if key < current.key:\n # add all greater keys\n greater += current.right.sum if current.right is not None else 0\n greater += current.key\n\n current = current.left # traverse left\n\n elif key > current.key:\n # add all smaller keys\n less += current.left.sum if current.left is not None else 0\n less += current.key\n\n current = current.right # traverse right\n\n if current is None: # key not found\n break\n\n if current is not None: # add from children\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) -> int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) -> bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) -> int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) -> int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) -> int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n\n while stack: # removing all links in loop\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n\n self.root = None\n\n def find(self, key: int) -> bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) -> bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty(): # empty tree, so value becomes the root\n self.root = Node(key)\n return True\n\n current = self.root # start at the root\n while current.key != key:\n\n if key < current.key:\n\n if current.left is None: # if no left child exists, insert element as left child\n self.root = current.add_left(key=key)\n return True\n\n else: # if a left child does exist, traverse left\n current = current.left\n\n elif key > current.key:\n\n if current.right is None: # if no right child exists, insert element as right child\n self.root = current.add_right(key=key)\n return True\n\n else: # if a right child does exist, traverse right\n current = current.right\n\n return False # failure to insert\n\n def remove(self, key: int) -> bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None: # if no such key, failure\n return False\n\n self.root = current.remove() # update root\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\ndef f(x: int, s: int):\n return (x + s) % 1000000001\n\n\nif __name__ == '__main__':\n n = int(sys.stdin.readline().strip())\n tree = AVLTree()\n ls = 0 # last sum\n for _ in range(n):\n op, *args = sys.stdin.readline().strip().split(\" \")\n if op == \"+\":\n arg = f(int(args[0]), ls)\n tree.insert(arg)\n elif op == \"-\":\n arg = f(int(args[0]), ls)\n tree.remove(arg)\n elif op == \"?\":\n arg = f(int(args[0]), ls)\n print(\"Found\" if tree.find(arg) else \"Not found\")\n else:\n args = map(lambda x: f(int(x), ls), args)\n ls = tree.segment_sum(*args)\n print(ls)\n\n\n@given(st.lists(\n st.integers(min_value=-10000, max_value=10000),\n max_size=10000, unique=True))\n@settings(max_examples=250)\ndef test_bst_properties(seq):\n test_tree = AVLTree()\n for x in seq:\n test_tree.insert(x)\n assert list(test_tree) == sorted(seq)\n assert test_tree.size == len(seq)\n assert test_tree.height <= 1.44 * log2(1 + len(seq))\n if len(seq) >= 10:\n assert test_tree.find(seq[5])\n assert test_tree.find(seq[3])\n assert test_tree.find(25) == (25 in seq)\n assert test_tree.remove(seq.pop())\n assert list(test_tree) == sorted(seq)\n assert test_tree.remove(seq.pop())\n assert list(test_tree) == sorted(seq)\n assert test_tree.size == len(seq)\n assert test_tree.find(seq[5])\n assert test_tree.remove(seq.pop(len(seq) // 2))\n assert list(test_tree) == sorted(seq)\n assert test_tree[2] == sorted(seq)[1]\n assert test_tree[test_tree.size] == sorted(seq)[len(seq)-1]\n\n\ndef test_operations():\n test_tree = AVLTree()\n # round one\n s = 0\n assert not test_tree.find(f(1, s)) # find 1\n assert test_tree.insert(f(1, s)) # add 1\n assert test_tree.find(f(1, s)) # find 1\n assert test_tree.insert(f(2, s)) # add 2\n s = test_tree.segment_sum(f(1, s), f(2, s)) # sum(1, 2)\n assert s == 3\n assert not test_tree.insert(f(1000000000, s)) # add 2\n assert test_tree.find(f(1000000000, s)) # find 2\n assert test_tree.remove(f(1000000000, s)) # remove 2\n assert not test_tree.remove(f(1000000000, s)) # find 2\n s = test_tree.segment_sum(f(999999999, s), f(1000000000, s)) # sum(1, 2)\n assert s == 1\n assert not test_tree.remove(f(2, s)) # remove 3\n assert not test_tree.find(f(2, s)) # find 3\n assert test_tree.remove(f(0, s)) # remove 1\n assert test_tree.insert(f(9, s)) # add 10\n s = test_tree.segment_sum(f(0, s), f(9, s)) # sum(1, 10)\n assert s == 10\n test_tree.clear()\n # round two\n s = 0\n assert not test_tree.find(f(0, s))\n assert test_tree.insert(f(0, s))\n assert test_tree.find(f(0, s))\n assert test_tree.remove(f(0, s))\n assert not test_tree.find(f(0, s))\n test_tree.clear()\n # round three\n s = 0\n assert test_tree.insert(f(491572259, s))\n assert test_tree.find(f(491572259, s))\n assert not test_tree.find(f(899375874, s))\n s = test_tree.segment_sum(f(310971296, s), f(877523306, s))\n assert s == 491572259\n assert test_tree.insert(f(352411209, s))\n",
"import sys\nfrom math import log2\nfrom typing import Union, Tuple\nimport hypothesis.strategies as st\nfrom hypothesis import given, settings\n\n\nclass Node:\n \"\"\"Node class for AVL tree. Implements many useful methods.\"\"\"\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n\n def __erase_links(self):\n self.parent = None\n self.left = None\n self.right = None\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n\n def add_left(self, key: int) ->'Node':\n \"\"\"Add left child, return new root. Raises an exception when such child exists.\"\"\"\n if self.left is None:\n if key >= self.key:\n raise ValueError(\"Key isn't less than parent's key\")\n self.left = Node(key=key)\n self.left.parent = self\n else:\n raise ValueError('Left child exists')\n return self.__balance()\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) ->'Node':\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\ndef f(x: int, s: int):\n return (x + s) % 1000000001\n\n\nif __name__ == '__main__':\n n = int(sys.stdin.readline().strip())\n tree = AVLTree()\n ls = 0\n for _ in range(n):\n op, *args = sys.stdin.readline().strip().split(' ')\n if op == '+':\n arg = f(int(args[0]), ls)\n tree.insert(arg)\n elif op == '-':\n arg = f(int(args[0]), ls)\n tree.remove(arg)\n elif op == '?':\n arg = f(int(args[0]), ls)\n print('Found' if tree.find(arg) else 'Not found')\n else:\n args = map(lambda x: f(int(x), ls), args)\n ls = tree.segment_sum(*args)\n print(ls)\n\n\n@given(st.lists(st.integers(min_value=-10000, max_value=10000), max_size=\n 10000, unique=True))\n@settings(max_examples=250)\ndef test_bst_properties(seq):\n test_tree = AVLTree()\n for x in seq:\n test_tree.insert(x)\n assert list(test_tree) == sorted(seq)\n assert test_tree.size == len(seq)\n assert test_tree.height <= 1.44 * log2(1 + len(seq))\n if len(seq) >= 10:\n assert test_tree.find(seq[5])\n assert test_tree.find(seq[3])\n assert test_tree.find(25) == (25 in seq)\n assert test_tree.remove(seq.pop())\n assert list(test_tree) == sorted(seq)\n assert test_tree.remove(seq.pop())\n assert list(test_tree) == sorted(seq)\n assert test_tree.size == len(seq)\n assert test_tree.find(seq[5])\n assert test_tree.remove(seq.pop(len(seq) // 2))\n assert list(test_tree) == sorted(seq)\n assert test_tree[2] == sorted(seq)[1]\n assert test_tree[test_tree.size] == sorted(seq)[len(seq) - 1]\n\n\ndef test_operations():\n test_tree = AVLTree()\n s = 0\n assert not test_tree.find(f(1, s))\n assert test_tree.insert(f(1, s))\n assert test_tree.find(f(1, s))\n assert test_tree.insert(f(2, s))\n s = test_tree.segment_sum(f(1, s), f(2, s))\n assert s == 3\n assert not test_tree.insert(f(1000000000, s))\n assert test_tree.find(f(1000000000, s))\n assert test_tree.remove(f(1000000000, s))\n assert not test_tree.remove(f(1000000000, s))\n s = test_tree.segment_sum(f(999999999, s), f(1000000000, s))\n assert s == 1\n assert not test_tree.remove(f(2, s))\n assert not test_tree.find(f(2, s))\n assert test_tree.remove(f(0, s))\n assert test_tree.insert(f(9, s))\n s = test_tree.segment_sum(f(0, s), f(9, s))\n assert s == 10\n test_tree.clear()\n s = 0\n assert not test_tree.find(f(0, s))\n assert test_tree.insert(f(0, s))\n assert test_tree.find(f(0, s))\n assert test_tree.remove(f(0, s))\n assert not test_tree.find(f(0, s))\n test_tree.clear()\n s = 0\n assert test_tree.insert(f(491572259, s))\n assert test_tree.find(f(491572259, s))\n assert not test_tree.find(f(899375874, s))\n s = test_tree.segment_sum(f(310971296, s), f(877523306, s))\n assert s == 491572259\n assert test_tree.insert(f(352411209, s))\n",
"<import token>\n\n\nclass Node:\n \"\"\"Node class for AVL tree. Implements many useful methods.\"\"\"\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n\n def __erase_links(self):\n self.parent = None\n self.left = None\n self.right = None\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n\n def add_left(self, key: int) ->'Node':\n \"\"\"Add left child, return new root. Raises an exception when such child exists.\"\"\"\n if self.left is None:\n if key >= self.key:\n raise ValueError(\"Key isn't less than parent's key\")\n self.left = Node(key=key)\n self.left.parent = self\n else:\n raise ValueError('Left child exists')\n return self.__balance()\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) ->'Node':\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\ndef f(x: int, s: int):\n return (x + s) % 1000000001\n\n\nif __name__ == '__main__':\n n = int(sys.stdin.readline().strip())\n tree = AVLTree()\n ls = 0\n for _ in range(n):\n op, *args = sys.stdin.readline().strip().split(' ')\n if op == '+':\n arg = f(int(args[0]), ls)\n tree.insert(arg)\n elif op == '-':\n arg = f(int(args[0]), ls)\n tree.remove(arg)\n elif op == '?':\n arg = f(int(args[0]), ls)\n print('Found' if tree.find(arg) else 'Not found')\n else:\n args = map(lambda x: f(int(x), ls), args)\n ls = tree.segment_sum(*args)\n print(ls)\n\n\n@given(st.lists(st.integers(min_value=-10000, max_value=10000), max_size=\n 10000, unique=True))\n@settings(max_examples=250)\ndef test_bst_properties(seq):\n test_tree = AVLTree()\n for x in seq:\n test_tree.insert(x)\n assert list(test_tree) == sorted(seq)\n assert test_tree.size == len(seq)\n assert test_tree.height <= 1.44 * log2(1 + len(seq))\n if len(seq) >= 10:\n assert test_tree.find(seq[5])\n assert test_tree.find(seq[3])\n assert test_tree.find(25) == (25 in seq)\n assert test_tree.remove(seq.pop())\n assert list(test_tree) == sorted(seq)\n assert test_tree.remove(seq.pop())\n assert list(test_tree) == sorted(seq)\n assert test_tree.size == len(seq)\n assert test_tree.find(seq[5])\n assert test_tree.remove(seq.pop(len(seq) // 2))\n assert list(test_tree) == sorted(seq)\n assert test_tree[2] == sorted(seq)[1]\n assert test_tree[test_tree.size] == sorted(seq)[len(seq) - 1]\n\n\ndef test_operations():\n test_tree = AVLTree()\n s = 0\n assert not test_tree.find(f(1, s))\n assert test_tree.insert(f(1, s))\n assert test_tree.find(f(1, s))\n assert test_tree.insert(f(2, s))\n s = test_tree.segment_sum(f(1, s), f(2, s))\n assert s == 3\n assert not test_tree.insert(f(1000000000, s))\n assert test_tree.find(f(1000000000, s))\n assert test_tree.remove(f(1000000000, s))\n assert not test_tree.remove(f(1000000000, s))\n s = test_tree.segment_sum(f(999999999, s), f(1000000000, s))\n assert s == 1\n assert not test_tree.remove(f(2, s))\n assert not test_tree.find(f(2, s))\n assert test_tree.remove(f(0, s))\n assert test_tree.insert(f(9, s))\n s = test_tree.segment_sum(f(0, s), f(9, s))\n assert s == 10\n test_tree.clear()\n s = 0\n assert not test_tree.find(f(0, s))\n assert test_tree.insert(f(0, s))\n assert test_tree.find(f(0, s))\n assert test_tree.remove(f(0, s))\n assert not test_tree.find(f(0, s))\n test_tree.clear()\n s = 0\n assert test_tree.insert(f(491572259, s))\n assert test_tree.find(f(491572259, s))\n assert not test_tree.find(f(899375874, s))\n s = test_tree.segment_sum(f(310971296, s), f(877523306, s))\n assert s == 491572259\n assert test_tree.insert(f(352411209, s))\n",
"<import token>\n\n\nclass Node:\n \"\"\"Node class for AVL tree. Implements many useful methods.\"\"\"\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n\n def __erase_links(self):\n self.parent = None\n self.left = None\n self.right = None\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n\n def add_left(self, key: int) ->'Node':\n \"\"\"Add left child, return new root. Raises an exception when such child exists.\"\"\"\n if self.left is None:\n if key >= self.key:\n raise ValueError(\"Key isn't less than parent's key\")\n self.left = Node(key=key)\n self.left.parent = self\n else:\n raise ValueError('Left child exists')\n return self.__balance()\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) ->'Node':\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\ndef f(x: int, s: int):\n return (x + s) % 1000000001\n\n\n<code token>\n\n\n@given(st.lists(st.integers(min_value=-10000, max_value=10000), max_size=\n 10000, unique=True))\n@settings(max_examples=250)\ndef test_bst_properties(seq):\n test_tree = AVLTree()\n for x in seq:\n test_tree.insert(x)\n assert list(test_tree) == sorted(seq)\n assert test_tree.size == len(seq)\n assert test_tree.height <= 1.44 * log2(1 + len(seq))\n if len(seq) >= 10:\n assert test_tree.find(seq[5])\n assert test_tree.find(seq[3])\n assert test_tree.find(25) == (25 in seq)\n assert test_tree.remove(seq.pop())\n assert list(test_tree) == sorted(seq)\n assert test_tree.remove(seq.pop())\n assert list(test_tree) == sorted(seq)\n assert test_tree.size == len(seq)\n assert test_tree.find(seq[5])\n assert test_tree.remove(seq.pop(len(seq) // 2))\n assert list(test_tree) == sorted(seq)\n assert test_tree[2] == sorted(seq)[1]\n assert test_tree[test_tree.size] == sorted(seq)[len(seq) - 1]\n\n\ndef test_operations():\n test_tree = AVLTree()\n s = 0\n assert not test_tree.find(f(1, s))\n assert test_tree.insert(f(1, s))\n assert test_tree.find(f(1, s))\n assert test_tree.insert(f(2, s))\n s = test_tree.segment_sum(f(1, s), f(2, s))\n assert s == 3\n assert not test_tree.insert(f(1000000000, s))\n assert test_tree.find(f(1000000000, s))\n assert test_tree.remove(f(1000000000, s))\n assert not test_tree.remove(f(1000000000, s))\n s = test_tree.segment_sum(f(999999999, s), f(1000000000, s))\n assert s == 1\n assert not test_tree.remove(f(2, s))\n assert not test_tree.find(f(2, s))\n assert test_tree.remove(f(0, s))\n assert test_tree.insert(f(9, s))\n s = test_tree.segment_sum(f(0, s), f(9, s))\n assert s == 10\n test_tree.clear()\n s = 0\n assert not test_tree.find(f(0, s))\n assert test_tree.insert(f(0, s))\n assert test_tree.find(f(0, s))\n assert test_tree.remove(f(0, s))\n assert not test_tree.find(f(0, s))\n test_tree.clear()\n s = 0\n assert test_tree.insert(f(491572259, s))\n assert test_tree.find(f(491572259, s))\n assert not test_tree.find(f(899375874, s))\n s = test_tree.segment_sum(f(310971296, s), f(877523306, s))\n assert s == 491572259\n assert test_tree.insert(f(352411209, s))\n",
"<import token>\n\n\nclass Node:\n \"\"\"Node class for AVL tree. Implements many useful methods.\"\"\"\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n\n def __erase_links(self):\n self.parent = None\n self.left = None\n self.right = None\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n\n def add_left(self, key: int) ->'Node':\n \"\"\"Add left child, return new root. Raises an exception when such child exists.\"\"\"\n if self.left is None:\n if key >= self.key:\n raise ValueError(\"Key isn't less than parent's key\")\n self.left = Node(key=key)\n self.left.parent = self\n else:\n raise ValueError('Left child exists')\n return self.__balance()\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) ->'Node':\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\ndef f(x: int, s: int):\n return (x + s) % 1000000001\n\n\n<code token>\n<function token>\n\n\ndef test_operations():\n test_tree = AVLTree()\n s = 0\n assert not test_tree.find(f(1, s))\n assert test_tree.insert(f(1, s))\n assert test_tree.find(f(1, s))\n assert test_tree.insert(f(2, s))\n s = test_tree.segment_sum(f(1, s), f(2, s))\n assert s == 3\n assert not test_tree.insert(f(1000000000, s))\n assert test_tree.find(f(1000000000, s))\n assert test_tree.remove(f(1000000000, s))\n assert not test_tree.remove(f(1000000000, s))\n s = test_tree.segment_sum(f(999999999, s), f(1000000000, s))\n assert s == 1\n assert not test_tree.remove(f(2, s))\n assert not test_tree.find(f(2, s))\n assert test_tree.remove(f(0, s))\n assert test_tree.insert(f(9, s))\n s = test_tree.segment_sum(f(0, s), f(9, s))\n assert s == 10\n test_tree.clear()\n s = 0\n assert not test_tree.find(f(0, s))\n assert test_tree.insert(f(0, s))\n assert test_tree.find(f(0, s))\n assert test_tree.remove(f(0, s))\n assert not test_tree.find(f(0, s))\n test_tree.clear()\n s = 0\n assert test_tree.insert(f(491572259, s))\n assert test_tree.find(f(491572259, s))\n assert not test_tree.find(f(899375874, s))\n s = test_tree.segment_sum(f(310971296, s), f(877523306, s))\n assert s == 491572259\n assert test_tree.insert(f(352411209, s))\n",
"<import token>\n\n\nclass Node:\n \"\"\"Node class for AVL tree. Implements many useful methods.\"\"\"\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n\n def __erase_links(self):\n self.parent = None\n self.left = None\n self.right = None\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n\n def add_left(self, key: int) ->'Node':\n \"\"\"Add left child, return new root. Raises an exception when such child exists.\"\"\"\n if self.left is None:\n if key >= self.key:\n raise ValueError(\"Key isn't less than parent's key\")\n self.left = Node(key=key)\n self.left.parent = self\n else:\n raise ValueError('Left child exists')\n return self.__balance()\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) ->'Node':\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n\n\ndef test_operations():\n test_tree = AVLTree()\n s = 0\n assert not test_tree.find(f(1, s))\n assert test_tree.insert(f(1, s))\n assert test_tree.find(f(1, s))\n assert test_tree.insert(f(2, s))\n s = test_tree.segment_sum(f(1, s), f(2, s))\n assert s == 3\n assert not test_tree.insert(f(1000000000, s))\n assert test_tree.find(f(1000000000, s))\n assert test_tree.remove(f(1000000000, s))\n assert not test_tree.remove(f(1000000000, s))\n s = test_tree.segment_sum(f(999999999, s), f(1000000000, s))\n assert s == 1\n assert not test_tree.remove(f(2, s))\n assert not test_tree.find(f(2, s))\n assert test_tree.remove(f(0, s))\n assert test_tree.insert(f(9, s))\n s = test_tree.segment_sum(f(0, s), f(9, s))\n assert s == 10\n test_tree.clear()\n s = 0\n assert not test_tree.find(f(0, s))\n assert test_tree.insert(f(0, s))\n assert test_tree.find(f(0, s))\n assert test_tree.remove(f(0, s))\n assert not test_tree.find(f(0, s))\n test_tree.clear()\n s = 0\n assert test_tree.insert(f(491572259, s))\n assert test_tree.find(f(491572259, s))\n assert not test_tree.find(f(899375874, s))\n s = test_tree.segment_sum(f(310971296, s), f(877523306, s))\n assert s == 491572259\n assert test_tree.insert(f(352411209, s))\n",
"<import token>\n\n\nclass Node:\n \"\"\"Node class for AVL tree. Implements many useful methods.\"\"\"\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n\n def __erase_links(self):\n self.parent = None\n self.left = None\n self.right = None\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n\n def add_left(self, key: int) ->'Node':\n \"\"\"Add left child, return new root. Raises an exception when such child exists.\"\"\"\n if self.left is None:\n if key >= self.key:\n raise ValueError(\"Key isn't less than parent's key\")\n self.left = Node(key=key)\n self.left.parent = self\n else:\n raise ValueError('Left child exists')\n return self.__balance()\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) ->'Node':\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n\n def __erase_links(self):\n self.parent = None\n self.left = None\n self.right = None\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n\n def add_left(self, key: int) ->'Node':\n \"\"\"Add left child, return new root. Raises an exception when such child exists.\"\"\"\n if self.left is None:\n if key >= self.key:\n raise ValueError(\"Key isn't less than parent's key\")\n self.left = Node(key=key)\n self.left.parent = self\n else:\n raise ValueError('Left child exists')\n return self.__balance()\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) ->'Node':\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n\n def add_left(self, key: int) ->'Node':\n \"\"\"Add left child, return new root. Raises an exception when such child exists.\"\"\"\n if self.left is None:\n if key >= self.key:\n raise ValueError(\"Key isn't less than parent's key\")\n self.left = Node(key=key)\n self.left.parent = self\n else:\n raise ValueError('Left child exists')\n return self.__balance()\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) ->'Node':\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self) ->'Node':\n \"\"\"Return element with maximal key in this subtree.\"\"\"\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n\n def remove(self):\n \"\"\"Remove element from the tree, update links of adjacent nodes.\"\"\"\n parent = self.parent\n if self.left is None and self.right is None:\n if parent is None:\n return None\n elif parent.left is self:\n parent.left = None\n elif parent.right is self:\n parent.right = None\n elif self.left is not None and self.right is None:\n left = self.left\n left.parent = parent\n self.left = None\n if parent is None:\n return left\n elif parent.left is self:\n parent.left = left\n elif parent.right is self:\n parent.right = left\n elif self.left is None and self.right is not None:\n right = self.right\n right.parent = parent\n self.right = None\n if parent is None:\n return right\n elif parent.left is self:\n parent.left = right\n elif parent.right is self:\n parent.right = right\n else:\n successor = self.successor()\n self.key = successor.key\n return successor.remove()\n self.parent = None\n return parent.__balance()\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n\n def __update_height(self):\n left_height = self.left.height if self.left is not None else 0\n right_height = self.right.height if self.right is not None else 0\n self.height = 1 + max(left_height, right_height)\n self.balance_factor = right_height - left_height\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n\n def split_sums(self, key: int) ->Tuple[int, int]:\n \"\"\"Compute sum of all keys less than this one,\n sum of all keys greater than this one and then return them.\"\"\"\n current = self\n less, greater = 0, 0\n while key != current.key:\n if key < current.key:\n greater += (current.right.sum if current.right is not None else\n 0)\n greater += current.key\n current = current.left\n elif key > current.key:\n less += current.left.sum if current.left is not None else 0\n less += current.key\n current = current.right\n if current is None:\n break\n if current is not None:\n less += current.left.sum if current.left is not None else 0\n greater += current.right.sum if current.right is not None else 0\n return less, greater\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n\n def __update_size(self):\n left_size = self.left.size if self.left is not None else 0\n right_size = self.right.size if self.right is not None else 0\n self.size = 1 + left_size + right_size\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n\n def __init__(self, key: int):\n self.key: int = key\n self.sum: int = self.key\n self.height: int = 1\n self.size: int = 1\n self.balance_factor: int = 0\n self.parent: Union[Node, None] = None\n self.left: Union[Node, None] = None\n self.right: Union[Node, None] = None\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n <function token>\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n <function token>\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n\n def min(self) ->'Node':\n \"\"\"Return element with minimal key in this subtree.\"\"\"\n current = self\n while current.left is not None:\n current = current.left\n return current\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n <function token>\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n\n def add_right(self, key: int) ->'Node':\n \"\"\"Add right child, return new root. Raises an exception when such child exists.\"\"\"\n if self.right is None:\n if key <= self.key:\n raise ValueError(\"Key isn't bigger than parent's key\")\n self.right = Node(key=key)\n self.right.parent = self\n else:\n raise ValueError('Right child exists')\n return self.__balance()\n <function token>\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n <function token>\n\n def __update_all(self):\n self.__update_height()\n self.__update_size()\n self.__update_sum()\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n\n def __iter__(self):\n \"\"\"In-order traversal of subtree.\"\"\"\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n <function token>\n <function token>\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n <function token>\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n <function token>\n <function token>\n\n def __rotate_left(self) ->'Node':\n \"\"\"Perform a left AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.right\n self.right = lower.left\n if self.right is not None:\n self.right.parent = self\n self.__update_all()\n parent = self.parent\n lower.left = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n <function token>\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n\n def __update_sum(self):\n left_sum = self.left.sum if self.left is not None else 0\n right_sum = self.right.sum if self.right is not None else 0\n self.sum = self.key + left_sum + right_sum\n <function token>\n <function token>\n <function token>\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n <function token>\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n\n def predecessor(self) ->Union['Node', None]:\n \"\"\"Return element with previous key.\"\"\"\n if self.left is not None:\n return self.left.max()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n return None\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n <function token>\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __rotate_right(self) ->'Node':\n \"\"\"Perform a right AVL rotation on the node. Returns new root of the subtree.\"\"\"\n lower = self.left\n self.left = lower.right\n if self.left is not None:\n self.left.parent = self\n self.__update_all()\n parent = self.parent\n lower.right = self\n self.parent = lower\n lower.__update_all()\n lower.parent = parent\n if parent is not None and parent.left == self:\n parent.left = lower\n elif parent is not None and parent.right == self:\n parent.right = lower\n return lower\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n <function token>\n\n def __getitem__(self, item: int, base: int=1) ->'Node':\n \"\"\"Access element by index.\"\"\"\n item += base ^ 1\n left_size = self.left.size if self.left is not None else 0\n if 0 < item < left_size + 1:\n return self.left.__getitem__(item)\n elif item == left_size + 1:\n return self\n elif left_size + 1 < item <= self.size:\n return self.right.__getitem__(item - left_size - 1)\n else:\n raise IndexError(f'Index out of range: {item - base ^ 1}')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n\n def __repr__(self):\n return (\n f'Node: key={self.key}, height={self.height}, balance_factor={self.balance_factor}, size={self.size}, sum={self.sum})'\n )\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def successor(self) ->Union['Node', None]:\n \"\"\"Return element with next key.\"\"\"\n if self.right is not None:\n return self.right.min()\n else:\n current = self\n while current.parent is not None:\n if current == current.parent.left:\n return current.parent\n else:\n current = current.parent\n return None\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __balance(self) ->'Node':\n \"\"\"Balance tree starting from current node. Return root node of the whole tree.\"\"\"\n current = self\n while True:\n current.__update_height()\n if current.balance_factor == 2:\n middle = current.right\n if middle.balance_factor < 0:\n middle.__rotate_right()\n current = current.__rotate_left()\n elif current.balance_factor == -2:\n middle = current.left\n if middle.balance_factor > 0:\n middle.__rotate_left()\n current = current.__rotate_right()\n else:\n current.__update_size()\n current.__update_sum()\n if current.parent is None:\n return current\n current = current.parent\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def find(self, key) ->Union['Node', None]:\n \"\"\"Return element with such key if present, otherwise None.\"\"\"\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left\n elif key > current.key:\n current = current.right\n if current is None:\n break\n return current\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Node:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n\n @property\n def size(self) ->int:\n \"\"\"Return the number of nodes in the tree.\"\"\"\n return self.root.size if not self.empty() else 0\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n\n @property\n def height(self) ->int:\n \"\"\"Return the height of the tree.\"\"\"\n return self.root.height if not self.empty() else 0\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n\n def remove(self, key: int) ->bool:\n \"\"\"Remove element with such key if it exists in the tree (return True),\n or return False otherwise.\"\"\"\n current = self.root.find(key) if not self.empty() else None\n if current is None:\n return False\n self.root = current.remove()\n return True\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n\n def __getitem__(self, item: int) ->int:\n \"\"\"Get key by index.\"\"\"\n return self.root[item].key\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n <function token>\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n\n def __init__(self, root=None):\n self.root: Union[Node, None] = root\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n <function token>\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n <function token>\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n <function token>\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n <function token>\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n\n def find(self, key: int) ->bool:\n \"\"\"Return True if element with such key exists in the tree, otherwise False.\"\"\"\n if self.empty():\n return False\n return self.root.find(key) is not None\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n <function token>\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n <function token>\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n <function token>\n\n def empty(self) ->bool:\n \"\"\"Check if the tree is empty.\"\"\"\n return True if self.root is None else False\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n <function token>\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n <function token>\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n <function token>\n\n def __iter__(self):\n \"\"\"In-order traversal of the tree.\"\"\"\n if self.empty():\n return\n for node in self.root:\n yield node.key\n <function token>\n <function token>\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n <function token>\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n <function token>\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n <function token>\n\n def insert(self, key: int) ->bool:\n \"\"\"Insert key (create new element) in the tree\n and return True on success or False on failure.\"\"\"\n if self.empty():\n self.root = Node(key)\n return True\n current = self.root\n while current.key != key:\n if key < current.key:\n if current.left is None:\n self.root = current.add_left(key=key)\n return True\n else:\n current = current.left\n elif key > current.key:\n if current.right is None:\n self.root = current.add_right(key=key)\n return True\n else:\n current = current.right\n return False\n <function token>\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n\n def clear(self):\n \"\"\"Clear the tree.\"\"\"\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node is None:\n continue\n node.parent = None\n if node.left is not None:\n stack.append(node.left)\n node.left = None\n if node.right is not None:\n stack.append(node.right)\n node.right = None\n self.root = None\n <function token>\n <function token>\n <function token>\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def segment_sum(self, left, right):\n \"\"\"Compute sum of all tree keys in segment [left, right].\"\"\"\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def sum(self) ->int:\n \"\"\"Return sum of all keys of the tree.\"\"\"\n return self.root.sum\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass AVLTree:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n<class token>\n<function token>\n<code token>\n<function token>\n<function token>\n"
] | false |
98,659 |
33ae4dbcd496be438267014506b76bac457aaf55
|
import time
import torch
def run_train(config, model, train_loader, eval_loader, writer):
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader), eta_min=config.lr)
training_loss_values = []
validation_loss_values = []
validation_accuracy_values = []
for epoch in range(config.epochs):
model.train()
print('======== Epoch {:} / {:} ========'.format(epoch + 1, config.epochs))
start_time = time.time()
total_loss = 0
for batch_no, batch in enumerate(train_loader):
source = batch[0].to(device)
target = batch[1].to(device)
model.zero_grad()
loss, logits = model(source, target)
total_loss += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = target.to('cpu').numpy()
loss.backward()
optimizer.step()
scheduler.step()
#Logging the loss and accuracy (below) in Tensorboard
avg_train_loss = total_loss / len(train_loader)
training_loss_values.append(avg_train_loss)
for name, weights in model.named_parameters():
writer.add_histogram(name, weights, epoch)
writer.add_scalar('Train/Loss', avg_train_loss, epoch)
print("Average training loss: {0:.2f}".format(avg_train_loss))
print("Running Validation...")
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps = 0
for batch_no, batch in enumerate(eval_loader):
source = batch[0].to(device)
target = batch[1].to(device)
with torch.no_grad():
loss, logits = model(source, target)
logits = logits.detach().cpu().numpy()
label_ids = target.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
eval_loss += loss
nb_eval_steps += 1
avg_valid_acc = eval_accuracy/nb_eval_steps
avg_valid_loss = eval_loss/nb_eval_steps
validation_loss_values.append(avg_valid_loss)
validation_accuracy_values.append(avg_valid_acc)
writer.add_scalar('Valid/Loss', avg_valid_loss, epoch)
writer.add_scalar('Valid/Accuracy', avg_valid_acc, epoch)
writer.flush()
print("Avg Val Accuracy: {0:.2f}".format(avg_valid_acc))
print("Average Val Loss: {0:.2f}".format(avg_valid_loss))
print("Time taken by epoch: {0:.2f}".format(time.time() - start_time))
return training_loss_values, validation_loss_values, validation_accuracy_values
|
[
"import time\nimport torch\n\n\n\n\ndef run_train(config, model, train_loader, eval_loader, writer):\n \n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader), eta_min=config.lr)\n \n training_loss_values = []\n validation_loss_values = []\n validation_accuracy_values = []\n\n for epoch in range(config.epochs):\n\n model.train()\n\n print('======== Epoch {:} / {:} ========'.format(epoch + 1, config.epochs))\n start_time = time.time()\n\n total_loss = 0\n\n for batch_no, batch in enumerate(train_loader):\n\n source = batch[0].to(device)\n target = batch[1].to(device)\n\n model.zero_grad() \n\n loss, logits = model(source, target)\n total_loss += loss.item()\n \n logits = logits.detach().cpu().numpy()\n label_ids = target.to('cpu').numpy()\n\n loss.backward()\n\n optimizer.step()\n scheduler.step()\n\n #Logging the loss and accuracy (below) in Tensorboard\n avg_train_loss = total_loss / len(train_loader) \n training_loss_values.append(avg_train_loss)\n\n for name, weights in model.named_parameters():\n writer.add_histogram(name, weights, epoch)\n\n writer.add_scalar('Train/Loss', avg_train_loss, epoch)\n\n print(\"Average training loss: {0:.2f}\".format(avg_train_loss))\n print(\"Running Validation...\")\n\n model.eval()\n\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps = 0\n\n for batch_no, batch in enumerate(eval_loader):\n \n source = batch[0].to(device)\n target = batch[1].to(device)\n \n with torch.no_grad(): \n loss, logits = model(source, target)\n\n logits = logits.detach().cpu().numpy()\n label_ids = target.to('cpu').numpy()\n \n tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n eval_accuracy += tmp_eval_accuracy\n eval_loss += loss\n\n nb_eval_steps += 1\n\n avg_valid_acc = eval_accuracy/nb_eval_steps\n avg_valid_loss = eval_loss/nb_eval_steps\n validation_loss_values.append(avg_valid_loss)\n validation_accuracy_values.append(avg_valid_acc)\n\n writer.add_scalar('Valid/Loss', avg_valid_loss, epoch)\n writer.add_scalar('Valid/Accuracy', avg_valid_acc, epoch)\n writer.flush()\n\n print(\"Avg Val Accuracy: {0:.2f}\".format(avg_valid_acc))\n print(\"Average Val Loss: {0:.2f}\".format(avg_valid_loss))\n print(\"Time taken by epoch: {0:.2f}\".format(time.time() - start_time))\n\n return training_loss_values, validation_loss_values, validation_accuracy_values\n\n\n",
"import time\nimport torch\n\n\ndef run_train(config, model, train_loader, eval_loader, writer):\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(\n train_loader), eta_min=config.lr)\n training_loss_values = []\n validation_loss_values = []\n validation_accuracy_values = []\n for epoch in range(config.epochs):\n model.train()\n print('======== Epoch {:} / {:} ========'.format(epoch + 1, config.\n epochs))\n start_time = time.time()\n total_loss = 0\n for batch_no, batch in enumerate(train_loader):\n source = batch[0].to(device)\n target = batch[1].to(device)\n model.zero_grad()\n loss, logits = model(source, target)\n total_loss += loss.item()\n logits = logits.detach().cpu().numpy()\n label_ids = target.to('cpu').numpy()\n loss.backward()\n optimizer.step()\n scheduler.step()\n avg_train_loss = total_loss / len(train_loader)\n training_loss_values.append(avg_train_loss)\n for name, weights in model.named_parameters():\n writer.add_histogram(name, weights, epoch)\n writer.add_scalar('Train/Loss', avg_train_loss, epoch)\n print('Average training loss: {0:.2f}'.format(avg_train_loss))\n print('Running Validation...')\n model.eval()\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps = 0\n for batch_no, batch in enumerate(eval_loader):\n source = batch[0].to(device)\n target = batch[1].to(device)\n with torch.no_grad():\n loss, logits = model(source, target)\n logits = logits.detach().cpu().numpy()\n label_ids = target.to('cpu').numpy()\n tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n eval_accuracy += tmp_eval_accuracy\n eval_loss += loss\n nb_eval_steps += 1\n avg_valid_acc = eval_accuracy / nb_eval_steps\n avg_valid_loss = eval_loss / nb_eval_steps\n validation_loss_values.append(avg_valid_loss)\n validation_accuracy_values.append(avg_valid_acc)\n writer.add_scalar('Valid/Loss', avg_valid_loss, epoch)\n writer.add_scalar('Valid/Accuracy', avg_valid_acc, epoch)\n writer.flush()\n print('Avg Val Accuracy: {0:.2f}'.format(avg_valid_acc))\n print('Average Val Loss: {0:.2f}'.format(avg_valid_loss))\n print('Time taken by epoch: {0:.2f}'.format(time.time() - start_time))\n return (training_loss_values, validation_loss_values,\n validation_accuracy_values)\n",
"<import token>\n\n\ndef run_train(config, model, train_loader, eval_loader, writer):\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(\n train_loader), eta_min=config.lr)\n training_loss_values = []\n validation_loss_values = []\n validation_accuracy_values = []\n for epoch in range(config.epochs):\n model.train()\n print('======== Epoch {:} / {:} ========'.format(epoch + 1, config.\n epochs))\n start_time = time.time()\n total_loss = 0\n for batch_no, batch in enumerate(train_loader):\n source = batch[0].to(device)\n target = batch[1].to(device)\n model.zero_grad()\n loss, logits = model(source, target)\n total_loss += loss.item()\n logits = logits.detach().cpu().numpy()\n label_ids = target.to('cpu').numpy()\n loss.backward()\n optimizer.step()\n scheduler.step()\n avg_train_loss = total_loss / len(train_loader)\n training_loss_values.append(avg_train_loss)\n for name, weights in model.named_parameters():\n writer.add_histogram(name, weights, epoch)\n writer.add_scalar('Train/Loss', avg_train_loss, epoch)\n print('Average training loss: {0:.2f}'.format(avg_train_loss))\n print('Running Validation...')\n model.eval()\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps = 0\n for batch_no, batch in enumerate(eval_loader):\n source = batch[0].to(device)\n target = batch[1].to(device)\n with torch.no_grad():\n loss, logits = model(source, target)\n logits = logits.detach().cpu().numpy()\n label_ids = target.to('cpu').numpy()\n tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n eval_accuracy += tmp_eval_accuracy\n eval_loss += loss\n nb_eval_steps += 1\n avg_valid_acc = eval_accuracy / nb_eval_steps\n avg_valid_loss = eval_loss / nb_eval_steps\n validation_loss_values.append(avg_valid_loss)\n validation_accuracy_values.append(avg_valid_acc)\n writer.add_scalar('Valid/Loss', avg_valid_loss, epoch)\n writer.add_scalar('Valid/Accuracy', avg_valid_acc, epoch)\n writer.flush()\n print('Avg Val Accuracy: {0:.2f}'.format(avg_valid_acc))\n print('Average Val Loss: {0:.2f}'.format(avg_valid_loss))\n print('Time taken by epoch: {0:.2f}'.format(time.time() - start_time))\n return (training_loss_values, validation_loss_values,\n validation_accuracy_values)\n",
"<import token>\n<function token>\n"
] | false |
98,660 |
1c05ed5408c071a57b2c0250247d03095747ce96
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('spheretrax_example_subscriber')
import rospy
from spheretrax_ros.msg import SphereTraxData
class SubscriberNode(object):
def __init__(self):
rospy.init_node('example_subscriber')
self.sub = rospy.Subscriber('/spheretrax/data',SphereTraxData,self.handle_data)
def run(self):
rospy.spin()
def handle_data(self,data):
print data
print
# -----------------------------------------------------------------------------
if __name__ == '__main__':
node = SubscriberNode()
node.run()
|
[
"#!/usr/bin/env python\nimport roslib\nroslib.load_manifest('spheretrax_example_subscriber')\nimport rospy\n\nfrom spheretrax_ros.msg import SphereTraxData\n\nclass SubscriberNode(object):\n\n def __init__(self):\n\n rospy.init_node('example_subscriber')\n self.sub = rospy.Subscriber('/spheretrax/data',SphereTraxData,self.handle_data)\n\n def run(self):\n rospy.spin()\n\n def handle_data(self,data):\n print data\n print\n\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n\n node = SubscriberNode()\n node.run()\n"
] | true |
98,661 |
5b469f7e4a1dee801eddbd8f5a6abd189a02c18c
|
__author__ = 'johan'
import argparse
from torganizer.setup import get_config, setup, setup_logging
parser = argparse.ArgumentParser(description='Torrent Organizer script')
parser.add_argument('--config', dest='config_path', action='store', type=str, help='Path to torganizer config yaml')
parser.add_argument('--src', dest='src', action='store', type=str, help='Source folder to organize')
args = parser.parse_args()
config = get_config(args.config_path)
setup_logging(config)
handler = setup(config, args.src)
handler.execute()
|
[
"__author__ = 'johan'\n\nimport argparse\nfrom torganizer.setup import get_config, setup, setup_logging\n\n\nparser = argparse.ArgumentParser(description='Torrent Organizer script')\nparser.add_argument('--config', dest='config_path', action='store', type=str, help='Path to torganizer config yaml')\nparser.add_argument('--src', dest='src', action='store', type=str, help='Source folder to organize')\nargs = parser.parse_args()\n\nconfig = get_config(args.config_path)\nsetup_logging(config)\nhandler = setup(config, args.src)\nhandler.execute()",
"__author__ = 'johan'\nimport argparse\nfrom torganizer.setup import get_config, setup, setup_logging\nparser = argparse.ArgumentParser(description='Torrent Organizer script')\nparser.add_argument('--config', dest='config_path', action='store', type=\n str, help='Path to torganizer config yaml')\nparser.add_argument('--src', dest='src', action='store', type=str, help=\n 'Source folder to organize')\nargs = parser.parse_args()\nconfig = get_config(args.config_path)\nsetup_logging(config)\nhandler = setup(config, args.src)\nhandler.execute()\n",
"__author__ = 'johan'\n<import token>\nparser = argparse.ArgumentParser(description='Torrent Organizer script')\nparser.add_argument('--config', dest='config_path', action='store', type=\n str, help='Path to torganizer config yaml')\nparser.add_argument('--src', dest='src', action='store', type=str, help=\n 'Source folder to organize')\nargs = parser.parse_args()\nconfig = get_config(args.config_path)\nsetup_logging(config)\nhandler = setup(config, args.src)\nhandler.execute()\n",
"<assignment token>\n<import token>\n<assignment token>\nparser.add_argument('--config', dest='config_path', action='store', type=\n str, help='Path to torganizer config yaml')\nparser.add_argument('--src', dest='src', action='store', type=str, help=\n 'Source folder to organize')\n<assignment token>\nsetup_logging(config)\n<assignment token>\nhandler.execute()\n",
"<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,662 |
4f08e9a3ec9ecc8c35791fabc3c62224e653da85
|
marks=[66,34,76,55,98,57,63]
weights=[45,63,76,89]
new_list=marks+weights
print("The list after append is:",new_list)
|
[
"marks=[66,34,76,55,98,57,63]\nweights=[45,63,76,89]\n\nnew_list=marks+weights\nprint(\"The list after append is:\",new_list)",
"marks = [66, 34, 76, 55, 98, 57, 63]\nweights = [45, 63, 76, 89]\nnew_list = marks + weights\nprint('The list after append is:', new_list)\n",
"<assignment token>\nprint('The list after append is:', new_list)\n",
"<assignment token>\n<code token>\n"
] | false |
98,663 |
99c5fade23d60b5af442fde30b3932e5c729d569
|
from sets import Set
class Solution(object):
# corner case: "hot", "dog", ["hot","dog","cog","pot","dot"] -> [['hot', 'dot', 'dog']]
def findLadders(self, beginWord, endWord, wordlist):
"""
:type beginWord: str
:type endWord: str
:type wordlist: Set[str]
:rtype: List[List[int]]
"""
from collections import deque
wordlist.add(endWord)
wordlist.remove(beginWord)
prev_dict = {beginWord:[]}
current = [beginWord]
res = []
while len(wordlist) > 0 or endWord in current:
#print wordlist
if endWord in current:
#print prev_dict
self.getResult(prev_dict,endWord,deque(),res)
return res
new_reachable = {}
for word in current:
for i in xrange(len(word)):
for ch in 'abcdefghijklmnopqrstuvwxyz':
if ch != word[i] and word[:i]+ch+word[i+1:] in wordlist:
new_word = word[:i]+ch+word[i+1:]
if new_word in new_reachable:
new_reachable[new_word].append(word)
else:
new_reachable[new_word] = [word]
#print new_reachable
for new_word in new_reachable:
wordlist.remove(new_word)
prev_dict.update(new_reachable)
current = new_reachable.keys()
#print current,prev_dict
if not current:
break
return res
def getResult(self,prev_dict,current_word,current_seq,res):
current_seq.appendleft(current_word)
if not prev_dict[current_word]:
res.append(list(current_seq))
current_seq.popleft()
return
for word in prev_dict[current_word]:
self.getResult(prev_dict,word,current_seq,res)
current_seq.popleft()
sol = Solution()
#{'hot': ['pot', 'dot'], 'pot': ['hot'], 'dot': ['hot'], 'dog': ['dot']}
print sol.findLadders("a","c", Set(["a","b",'c']))
|
[
"from sets import Set\nclass Solution(object):\n # corner case: \"hot\", \"dog\", [\"hot\",\"dog\",\"cog\",\"pot\",\"dot\"] -> [['hot', 'dot', 'dog']]\n def findLadders(self, beginWord, endWord, wordlist):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordlist: Set[str]\n :rtype: List[List[int]]\n \"\"\"\n from collections import deque\n wordlist.add(endWord)\n wordlist.remove(beginWord)\n prev_dict = {beginWord:[]}\n current = [beginWord]\n res = []\n while len(wordlist) > 0 or endWord in current:\n #print wordlist\n if endWord in current:\n #print prev_dict\n self.getResult(prev_dict,endWord,deque(),res)\n return res\n new_reachable = {}\n for word in current:\n for i in xrange(len(word)):\n for ch in 'abcdefghijklmnopqrstuvwxyz':\n if ch != word[i] and word[:i]+ch+word[i+1:] in wordlist:\n new_word = word[:i]+ch+word[i+1:]\n if new_word in new_reachable:\n new_reachable[new_word].append(word)\n else:\n new_reachable[new_word] = [word]\n #print new_reachable\n for new_word in new_reachable:\n wordlist.remove(new_word)\n prev_dict.update(new_reachable)\n current = new_reachable.keys()\n #print current,prev_dict\n if not current:\n break\n return res\n \n def getResult(self,prev_dict,current_word,current_seq,res):\n current_seq.appendleft(current_word)\n if not prev_dict[current_word]:\n res.append(list(current_seq))\n current_seq.popleft()\n return\n for word in prev_dict[current_word]:\n self.getResult(prev_dict,word,current_seq,res)\n current_seq.popleft()\n \nsol = Solution()\n#{'hot': ['pot', 'dot'], 'pot': ['hot'], 'dot': ['hot'], 'dog': ['dot']}\nprint sol.findLadders(\"a\",\"c\", Set([\"a\",\"b\",'c']))\n\n"
] | true |
98,664 |
6f3227faa7fc9b7e9123acf66fa57d346d576442
|
from tensorflow.keras import Model, Input, regularizers
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, UpSampling2D, Add, Dropout
from tensorflow.keras.backend import clear_session
from tensorflow.keras import Sequential
def Autoencoder(img_shape = (256, 256, 3)):
clear_session()
Input_img = Input(shape=img_shape)
#encoding architecture
x1 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(Input_img)
x2 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x1)
x3 = MaxPool2D(padding='same')(x2)
x4 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x3)
x5 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x4)
x6 = MaxPool2D(padding='same')(x5)
encoded = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x6)
#encoded = Conv2D(64, (3, 3), activation='relu', padding='same')(x2)
# decoding architecture
x7 = UpSampling2D()(encoded)
x8 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x7)
x9 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x8)
x10 = Add()([x5, x9])
x11 = UpSampling2D()(x10)
x12 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x11)
x13 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x12)
x14 = Add()([x2, x13])
# x3 = UpSampling2D((2, 2))(x3)
# x2 = Conv2D(128, (3, 3), activation='relu', padding='same')(x3)
# x1 = Conv2D(256, (3, 3), activation='relu', padding='same')(x2)
decoded = Conv2D(3, (3, 3), padding='same',activation='relu', kernel_regularizer=regularizers.l1(10e-10))(x14)
autoencoder = Model(Input_img, decoded)
return autoencoder
def SRCNN(img_shape = (256, 256, 3)):
model = Sequential()
model.add(Conv2D(32, 9, activation="relu", input_shape=img_shape, padding="same"))
model.add(Conv2D(16, 5, activation="relu", padding="same"))
model.add(Conv2D(3, 5, activation="relu", padding="same"))
return model
|
[
"from tensorflow.keras import Model, Input, regularizers\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, UpSampling2D, Add, Dropout\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras import Sequential\n\ndef Autoencoder(img_shape = (256, 256, 3)):\n clear_session()\n\n Input_img = Input(shape=img_shape) \n\n #encoding architecture\n x1 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(Input_img)\n x2 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x1)\n x3 = MaxPool2D(padding='same')(x2)\n x4 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x3)\n x5 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x4)\n x6 = MaxPool2D(padding='same')(x5)\n\n encoded = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x6)\n #encoded = Conv2D(64, (3, 3), activation='relu', padding='same')(x2)\n # decoding architecture\n x7 = UpSampling2D()(encoded)\n x8 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x7)\n x9 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x8)\n x10 = Add()([x5, x9])\n x11 = UpSampling2D()(x10)\n x12 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x11)\n x13 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x12)\n x14 = Add()([x2, x13])\n # x3 = UpSampling2D((2, 2))(x3)\n # x2 = Conv2D(128, (3, 3), activation='relu', padding='same')(x3)\n # x1 = Conv2D(256, (3, 3), activation='relu', padding='same')(x2)\n decoded = Conv2D(3, (3, 3), padding='same',activation='relu', kernel_regularizer=regularizers.l1(10e-10))(x14)\n autoencoder = Model(Input_img, decoded)\n return autoencoder\n\ndef SRCNN(img_shape = (256, 256, 3)):\n model = Sequential()\n model.add(Conv2D(32, 9, activation=\"relu\", input_shape=img_shape, padding=\"same\"))\n model.add(Conv2D(16, 5, activation=\"relu\", padding=\"same\"))\n model.add(Conv2D(3, 5, activation=\"relu\", padding=\"same\"))\n return model",
"from tensorflow.keras import Model, Input, regularizers\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, UpSampling2D, Add, Dropout\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras import Sequential\n\n\ndef Autoencoder(img_shape=(256, 256, 3)):\n clear_session()\n Input_img = Input(shape=img_shape)\n x1 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(Input_img)\n x2 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x1)\n x3 = MaxPool2D(padding='same')(x2)\n x4 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x3)\n x5 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x4)\n x6 = MaxPool2D(padding='same')(x5)\n encoded = Conv2D(256, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x6)\n x7 = UpSampling2D()(encoded)\n x8 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x7)\n x9 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x8)\n x10 = Add()([x5, x9])\n x11 = UpSampling2D()(x10)\n x12 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x11)\n x13 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x12)\n x14 = Add()([x2, x13])\n decoded = Conv2D(3, (3, 3), padding='same', activation='relu',\n kernel_regularizer=regularizers.l1(1e-09))(x14)\n autoencoder = Model(Input_img, decoded)\n return autoencoder\n\n\ndef SRCNN(img_shape=(256, 256, 3)):\n model = Sequential()\n model.add(Conv2D(32, 9, activation='relu', input_shape=img_shape,\n padding='same'))\n model.add(Conv2D(16, 5, activation='relu', padding='same'))\n model.add(Conv2D(3, 5, activation='relu', padding='same'))\n return model\n",
"<import token>\n\n\ndef Autoencoder(img_shape=(256, 256, 3)):\n clear_session()\n Input_img = Input(shape=img_shape)\n x1 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(Input_img)\n x2 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x1)\n x3 = MaxPool2D(padding='same')(x2)\n x4 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x3)\n x5 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x4)\n x6 = MaxPool2D(padding='same')(x5)\n encoded = Conv2D(256, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x6)\n x7 = UpSampling2D()(encoded)\n x8 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x7)\n x9 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x8)\n x10 = Add()([x5, x9])\n x11 = UpSampling2D()(x10)\n x12 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x11)\n x13 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x12)\n x14 = Add()([x2, x13])\n decoded = Conv2D(3, (3, 3), padding='same', activation='relu',\n kernel_regularizer=regularizers.l1(1e-09))(x14)\n autoencoder = Model(Input_img, decoded)\n return autoencoder\n\n\ndef SRCNN(img_shape=(256, 256, 3)):\n model = Sequential()\n model.add(Conv2D(32, 9, activation='relu', input_shape=img_shape,\n padding='same'))\n model.add(Conv2D(16, 5, activation='relu', padding='same'))\n model.add(Conv2D(3, 5, activation='relu', padding='same'))\n return model\n",
"<import token>\n\n\ndef Autoencoder(img_shape=(256, 256, 3)):\n clear_session()\n Input_img = Input(shape=img_shape)\n x1 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(Input_img)\n x2 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x1)\n x3 = MaxPool2D(padding='same')(x2)\n x4 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x3)\n x5 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x4)\n x6 = MaxPool2D(padding='same')(x5)\n encoded = Conv2D(256, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x6)\n x7 = UpSampling2D()(encoded)\n x8 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x7)\n x9 = Conv2D(128, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x8)\n x10 = Add()([x5, x9])\n x11 = UpSampling2D()(x10)\n x12 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x11)\n x13 = Conv2D(64, (3, 3), activation='relu', padding='same',\n kernel_regularizer=regularizers.l1(1e-09))(x12)\n x14 = Add()([x2, x13])\n decoded = Conv2D(3, (3, 3), padding='same', activation='relu',\n kernel_regularizer=regularizers.l1(1e-09))(x14)\n autoencoder = Model(Input_img, decoded)\n return autoencoder\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
98,665 |
63f52037847d18fd4208be751561adaf4e06d22e
|
class Solution:
def twoSum(self, numbers, target):
res = []
first_index = 0
last_index = len(numbers)-1
for i in range(len(numbers)):
if numbers[i] > target:
last_index = i
break
while first_index < last_index:
if numbers[first_index]+numbers[last_index] == target:
res.append(first_index+1)
res.append(last_index+1)
break
elif numbers[first_index] + numbers[last_index] < target:
first_index += 1
else:
last_index -= 1
return res
if __name__ == "__main__":
nums = [2, 7, 11, 15]
target = 9
print(Solution().twoSum(nums, target))
|
[
"class Solution:\n def twoSum(self, numbers, target):\n res = []\n first_index = 0\n last_index = len(numbers)-1\n for i in range(len(numbers)):\n if numbers[i] > target:\n last_index = i\n break\n while first_index < last_index:\n if numbers[first_index]+numbers[last_index] == target:\n res.append(first_index+1)\n res.append(last_index+1)\n break\n elif numbers[first_index] + numbers[last_index] < target:\n first_index += 1\n else:\n last_index -= 1\n return res\n\n\nif __name__ == \"__main__\":\n nums = [2, 7, 11, 15]\n target = 9\n print(Solution().twoSum(nums, target))\n",
"class Solution:\n\n def twoSum(self, numbers, target):\n res = []\n first_index = 0\n last_index = len(numbers) - 1\n for i in range(len(numbers)):\n if numbers[i] > target:\n last_index = i\n break\n while first_index < last_index:\n if numbers[first_index] + numbers[last_index] == target:\n res.append(first_index + 1)\n res.append(last_index + 1)\n break\n elif numbers[first_index] + numbers[last_index] < target:\n first_index += 1\n else:\n last_index -= 1\n return res\n\n\nif __name__ == '__main__':\n nums = [2, 7, 11, 15]\n target = 9\n print(Solution().twoSum(nums, target))\n",
"class Solution:\n\n def twoSum(self, numbers, target):\n res = []\n first_index = 0\n last_index = len(numbers) - 1\n for i in range(len(numbers)):\n if numbers[i] > target:\n last_index = i\n break\n while first_index < last_index:\n if numbers[first_index] + numbers[last_index] == target:\n res.append(first_index + 1)\n res.append(last_index + 1)\n break\n elif numbers[first_index] + numbers[last_index] < target:\n first_index += 1\n else:\n last_index -= 1\n return res\n\n\n<code token>\n",
"class Solution:\n <function token>\n\n\n<code token>\n",
"<class token>\n<code token>\n"
] | false |
98,666 |
207aa206d17ba094978067953c4a4ba59311545e
|
import unittest
from utils.channel_access import ChannelAccess
from utils.ioc_launcher import get_default_ioc_dir
from utils.test_modes import TestModes
from utils.testing import get_running_lewis_and_ioc, skip_if_recsim
DEVICE_PREFIX = "LINKAM95_01"
EMULATOR_NAME = "linkam_t95"
IOCS = [
{
"name": DEVICE_PREFIX,
"directory": get_default_ioc_dir("LINKAM95"),
"macros": {},
"emulator": EMULATOR_NAME,
"lewis_package": None
},
]
TEST_MODES = [TestModes.DEVSIM]
class Linkam95Tests(unittest.TestCase):
"""
Tests for the Linkam95 IOC.
"""
def setUp(self):
self._lewis, self._ioc = get_running_lewis_and_ioc(EMULATOR_NAME, DEVICE_PREFIX)
self.ca = ChannelAccess(device_prefix=DEVICE_PREFIX)
def test_GIVEN_a_valid_temperature_to_set_WHEN_set_THEN_display_temperature_is_valid_temperature(self):
expected_temp = 10
self._lewis.backdoor_set_on_device("temperature", expected_temp)
self.ca.assert_that_pv_is("TEMP", expected_temp)
|
[
"import unittest\n\nfrom utils.channel_access import ChannelAccess\nfrom utils.ioc_launcher import get_default_ioc_dir\nfrom utils.test_modes import TestModes\nfrom utils.testing import get_running_lewis_and_ioc, skip_if_recsim\n\n\nDEVICE_PREFIX = \"LINKAM95_01\"\n\nEMULATOR_NAME = \"linkam_t95\"\nIOCS = [\n {\n \"name\": DEVICE_PREFIX,\n \"directory\": get_default_ioc_dir(\"LINKAM95\"),\n \"macros\": {},\n \"emulator\": EMULATOR_NAME,\n \"lewis_package\": None\n },\n]\n\nTEST_MODES = [TestModes.DEVSIM]\n\n\nclass Linkam95Tests(unittest.TestCase):\n \"\"\"\n Tests for the Linkam95 IOC.\n \"\"\"\n def setUp(self):\n self._lewis, self._ioc = get_running_lewis_and_ioc(EMULATOR_NAME, DEVICE_PREFIX)\n self.ca = ChannelAccess(device_prefix=DEVICE_PREFIX)\n\n def test_GIVEN_a_valid_temperature_to_set_WHEN_set_THEN_display_temperature_is_valid_temperature(self):\n\n expected_temp = 10\n\n self._lewis.backdoor_set_on_device(\"temperature\", expected_temp)\n\n self.ca.assert_that_pv_is(\"TEMP\", expected_temp)\n",
"import unittest\nfrom utils.channel_access import ChannelAccess\nfrom utils.ioc_launcher import get_default_ioc_dir\nfrom utils.test_modes import TestModes\nfrom utils.testing import get_running_lewis_and_ioc, skip_if_recsim\nDEVICE_PREFIX = 'LINKAM95_01'\nEMULATOR_NAME = 'linkam_t95'\nIOCS = [{'name': DEVICE_PREFIX, 'directory': get_default_ioc_dir('LINKAM95'\n ), 'macros': {}, 'emulator': EMULATOR_NAME, 'lewis_package': None}]\nTEST_MODES = [TestModes.DEVSIM]\n\n\nclass Linkam95Tests(unittest.TestCase):\n \"\"\"\n Tests for the Linkam95 IOC.\n \"\"\"\n\n def setUp(self):\n self._lewis, self._ioc = get_running_lewis_and_ioc(EMULATOR_NAME,\n DEVICE_PREFIX)\n self.ca = ChannelAccess(device_prefix=DEVICE_PREFIX)\n\n def test_GIVEN_a_valid_temperature_to_set_WHEN_set_THEN_display_temperature_is_valid_temperature(\n self):\n expected_temp = 10\n self._lewis.backdoor_set_on_device('temperature', expected_temp)\n self.ca.assert_that_pv_is('TEMP', expected_temp)\n",
"<import token>\nDEVICE_PREFIX = 'LINKAM95_01'\nEMULATOR_NAME = 'linkam_t95'\nIOCS = [{'name': DEVICE_PREFIX, 'directory': get_default_ioc_dir('LINKAM95'\n ), 'macros': {}, 'emulator': EMULATOR_NAME, 'lewis_package': None}]\nTEST_MODES = [TestModes.DEVSIM]\n\n\nclass Linkam95Tests(unittest.TestCase):\n \"\"\"\n Tests for the Linkam95 IOC.\n \"\"\"\n\n def setUp(self):\n self._lewis, self._ioc = get_running_lewis_and_ioc(EMULATOR_NAME,\n DEVICE_PREFIX)\n self.ca = ChannelAccess(device_prefix=DEVICE_PREFIX)\n\n def test_GIVEN_a_valid_temperature_to_set_WHEN_set_THEN_display_temperature_is_valid_temperature(\n self):\n expected_temp = 10\n self._lewis.backdoor_set_on_device('temperature', expected_temp)\n self.ca.assert_that_pv_is('TEMP', expected_temp)\n",
"<import token>\n<assignment token>\n\n\nclass Linkam95Tests(unittest.TestCase):\n \"\"\"\n Tests for the Linkam95 IOC.\n \"\"\"\n\n def setUp(self):\n self._lewis, self._ioc = get_running_lewis_and_ioc(EMULATOR_NAME,\n DEVICE_PREFIX)\n self.ca = ChannelAccess(device_prefix=DEVICE_PREFIX)\n\n def test_GIVEN_a_valid_temperature_to_set_WHEN_set_THEN_display_temperature_is_valid_temperature(\n self):\n expected_temp = 10\n self._lewis.backdoor_set_on_device('temperature', expected_temp)\n self.ca.assert_that_pv_is('TEMP', expected_temp)\n",
"<import token>\n<assignment token>\n\n\nclass Linkam95Tests(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self._lewis, self._ioc = get_running_lewis_and_ioc(EMULATOR_NAME,\n DEVICE_PREFIX)\n self.ca = ChannelAccess(device_prefix=DEVICE_PREFIX)\n\n def test_GIVEN_a_valid_temperature_to_set_WHEN_set_THEN_display_temperature_is_valid_temperature(\n self):\n expected_temp = 10\n self._lewis.backdoor_set_on_device('temperature', expected_temp)\n self.ca.assert_that_pv_is('TEMP', expected_temp)\n",
"<import token>\n<assignment token>\n\n\nclass Linkam95Tests(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_GIVEN_a_valid_temperature_to_set_WHEN_set_THEN_display_temperature_is_valid_temperature(\n self):\n expected_temp = 10\n self._lewis.backdoor_set_on_device('temperature', expected_temp)\n self.ca.assert_that_pv_is('TEMP', expected_temp)\n",
"<import token>\n<assignment token>\n\n\nclass Linkam95Tests(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
98,667 |
a84fbed45178bd97031cd702412181ff155b5560
|
import sys
def ts2datetime(timeStamp):
import time
try:
timeStamp = int(timeStamp)
timeArray = time.localtime(timeStamp)
timet = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return timet
except Exception,e:
return ""
print "["
for line in sys.stdin:
if not line:
break
line = line.strip().split("\t")
if len(line) != 4: break
uid, ts, lat, lng, = line
datet = ts2datetime(ts)
print "{\"id\":\"%s\", \"time\":\"%s\", \"lat\":%s, \"lng\":%s}," % (uid, datet, lat, lng)
print "{}]"
|
[
"import sys\n\ndef ts2datetime(timeStamp):\n import time \n try:\n timeStamp = int(timeStamp)\n timeArray = time.localtime(timeStamp)\n timet = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n return timet\n except Exception,e:\n return \"\"\n\n\nprint \"[\"\nfor line in sys.stdin:\n if not line:\n break\n line = line.strip().split(\"\\t\")\n if len(line) != 4: break\n uid, ts, lat, lng, = line\n datet = ts2datetime(ts)\n\n print \"{\\\"id\\\":\\\"%s\\\", \\\"time\\\":\\\"%s\\\", \\\"lat\\\":%s, \\\"lng\\\":%s},\" % (uid, datet, lat, lng)\nprint \"{}]\"\n\n"
] | true |
98,668 |
4285374432cf1d8ad61812a892895c2160aaaa50
|
import os
from copy import deepcopy
from py_osm_cluster.util.coords import Coords as Coords
from py_osm_cluster.cluster import scikit
from py_osm_cluster.cluster import partitioning
from py_osm_cluster.eval import comparative as comparative
from py_osm_cluster.eval import standalone as standalone
from py_osm_cluster.cluster import hierarchical
import py_osm_cluster.util.statistic as statistic
import py_osm_cluster.visualisation.visualisation as visu
import py_osm_cluster.visualisation.animation as anim
import matplotlib.pyplot as plt
class GeneralTest:
def __init__(self):
self.current_object = None
self.current_rand_harvest = []
""" compresses the list of dictionaries into dictionary of lists"""
def compress_dicts(self,dictionary_list):
out = {x:[] for x in dictionary_list[0]}
for i in dictionary_list:
for k in out:
out[k].append(i[k])
return out
def harvest_rand_index(self,data_obj):
self.current_rand_harvest.append(comparative.scikit_rand_index(data_obj,self.current_object))
#print("harvesting")
def test_data_object(self,file,n_of_tests,function,function_kwargs):
function_kwargs["on_step"]=self.harvest_rand_index
#function_kwargs["iterations"]=15
data_obj = Coords()
data_obj.read_file(file)
self.current_object = data_obj
r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)
#r_initial_general_data_dict = standalone.general_evaluate_clustered_object(data_obj)
r_standalone =[]
r_comparative =[]
r_rand_indexes = []
for i in range(n_of_tests):
self.current_rand_harvest=[]
temp_data_obj = deepcopy(data_obj)
temp_data_obj= function(data_obj,**function_kwargs)
#print(self.current_rand_harvest)
r_rand_indexes.append(self.current_rand_harvest)
#plt.plot(self.current_rand_harvest)
#plt.show()
r_standalone.append(standalone.standard_scores_dict(temp_data_obj))
r_comparative.append(comparative.scikit_all_scores_dict(temp_data_obj,self.current_object))
#print(r_standalone)
#print(r_comparative)
out = {"rand_indexes":r_rand_indexes,"initial_standalone":r_initial_standalone_dict}
#print(r_rand_indexes)
out.update(self.compress_dicts(r_standalone))
out.update(self.compress_dicts(r_comparative))
return out
#"initial_general":r_initial_general_data_dict
def test_data_set(self,folder_name,n_of_tests,function,function_kwargs):
files = [folder_name+"/"+i for i in list(os.listdir(folder_name))]
out =[]
for i in files:
print("investigating file:"+i)
out.append(self.test_data_object(i,n_of_tests,function,function_kwargs))
return out
def compile_data(self,test_data_out):
data = self.compress_dicts(test_data_out)
data["initial_standalone"] = self.compress_dicts(data["initial_standalone"])
data["rand_indexes"] = [i for sublist in data["rand_indexes"] for i in sublist]
return data
def calculate_avg_stdev(self,compiled_data):
out={}
#print(compiled_data["rand_indexes"])
out["rand_indexes"] = list(zip(*compiled_data["rand_indexes"]))
out["rand_indexes"] = [statistic.stdev_avg(x) for x in out["rand_indexes"]]
print(out["rand_indexes"])
#out["initial_standalone"] = [statistic.stdev_avg(x) for x in out["rand_indexes"]]
for i in compiled_data:
if i is not "rand_indexes" and i is not "initial_standalone":
out[i] = [item for sublist in compiled_data[i] for item in sublist]
#print(out[i])
out[i] = statistic.stdev_avg(out[i])
out["initial_standalone"] = {k:statistic.stdev_avg(compiled_data["initial_standalone"][k]) for k in compiled_data["initial_standalone"]}
return out;
def execute(self,folder_name,n_of_tests,function,params):
data = self.compile_data(self.test_data_set(folder_name,n_of_tests,function,params))
return (data,self.calculate_avg_stdev(data))
import py_osm_cluster
def test_multiple_datasets(main_folder,n_of_tests,function,params):
short_folders = list(os.listdir(main_folder))
#folders = [main_folder+"/"+i for i in list(os.listdir(main_folder))]
gt = GeneralTest()
for i in short_folders:
unexpected= True
while unexpected:
try:
full_folder =main_folder+"/"+i
data = gt.execute(full_folder,n_of_tests,function,params)
#visu.lineplot(data[1]["rand_indexes"],i)
plt.plot([ i[1] for i in data[1]["rand_indexes"]],label=i)
f = open("output_for:"+i,"w")
f.write(str(data[1]))
unexpected = False
except ZeroDivisionError:
unexpected = True
plt.xlabel("iteration")
plt.ylabel("Rand index")
plt.legend(loc='lower right')
plt.show()
|
[
"import os\nfrom copy import deepcopy\nfrom py_osm_cluster.util.coords import Coords as Coords\nfrom py_osm_cluster.cluster import scikit\nfrom py_osm_cluster.cluster import partitioning\n\nfrom py_osm_cluster.eval import comparative as comparative\nfrom py_osm_cluster.eval import standalone as standalone\nfrom py_osm_cluster.cluster import hierarchical\n\nimport py_osm_cluster.util.statistic as statistic\n\nimport py_osm_cluster.visualisation.visualisation as visu\nimport py_osm_cluster.visualisation.animation as anim\nimport matplotlib.pyplot as plt\n\n\n\nclass GeneralTest:\n\tdef __init__(self):\n\t\tself.current_object = None\n\t\tself.current_rand_harvest = []\n\n\t\"\"\" compresses the list of dictionaries into dictionary of lists\"\"\"\n\tdef compress_dicts(self,dictionary_list):\n\t\tout = {x:[] for x in dictionary_list[0]}\n\t\tfor i in dictionary_list:\n\t\t\tfor k in out:\n\t\t\t\tout[k].append(i[k])\n\t\treturn out\n\tdef harvest_rand_index(self,data_obj):\n\t\tself.current_rand_harvest.append(comparative.scikit_rand_index(data_obj,self.current_object))\n\t\t#print(\"harvesting\")\n\n\tdef test_data_object(self,file,n_of_tests,function,function_kwargs):\n\t\tfunction_kwargs[\"on_step\"]=self.harvest_rand_index\n\t\t#function_kwargs[\"iterations\"]=15\n\t\tdata_obj = Coords()\n\t\tdata_obj.read_file(file)\n\t\tself.current_object = data_obj\n\n\t\tr_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n\t\t#r_initial_general_data_dict = standalone.general_evaluate_clustered_object(data_obj)\n\t\tr_standalone =[]\n\t\tr_comparative =[]\n\t\tr_rand_indexes = []\n\t\tfor i in range(n_of_tests):\n\t\t\tself.current_rand_harvest=[]\n\t\t\ttemp_data_obj = deepcopy(data_obj)\n\t\t\ttemp_data_obj= function(data_obj,**function_kwargs)\n\t\t\t#print(self.current_rand_harvest)\n\t\t\tr_rand_indexes.append(self.current_rand_harvest)\n\t\t\t#plt.plot(self.current_rand_harvest)\n\t\t\t#plt.show()\n\t\t\tr_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n\t\t\tr_comparative.append(comparative.scikit_all_scores_dict(temp_data_obj,self.current_object))\n\t\t#print(r_standalone)\n\t\t#print(r_comparative)\n\t\tout = {\"rand_indexes\":r_rand_indexes,\"initial_standalone\":r_initial_standalone_dict}\n\t\t#print(r_rand_indexes)\n\t\tout.update(self.compress_dicts(r_standalone))\n\t\tout.update(self.compress_dicts(r_comparative))\n\t\treturn out\n#\"initial_general\":r_initial_general_data_dict\n\n\tdef test_data_set(self,folder_name,n_of_tests,function,function_kwargs):\n\t\tfiles = [folder_name+\"/\"+i for i in list(os.listdir(folder_name))]\n\t\tout =[]\n\t\tfor i in files:\n\t\t\tprint(\"investigating file:\"+i)\n\t\t\tout.append(self.test_data_object(i,n_of_tests,function,function_kwargs))\n\t\treturn out\n\n\tdef compile_data(self,test_data_out):\n\t\tdata = self.compress_dicts(test_data_out)\n\t\tdata[\"initial_standalone\"] = self.compress_dicts(data[\"initial_standalone\"])\n\t\tdata[\"rand_indexes\"] = [i for sublist in data[\"rand_indexes\"] for i in sublist]\n\t\treturn data\n\n\tdef calculate_avg_stdev(self,compiled_data):\n\t\tout={}\n\t\t#print(compiled_data[\"rand_indexes\"])\n\t\tout[\"rand_indexes\"] = list(zip(*compiled_data[\"rand_indexes\"]))\n\t\tout[\"rand_indexes\"] = [statistic.stdev_avg(x) for x in out[\"rand_indexes\"]]\n\t\tprint(out[\"rand_indexes\"])\n\n\n\t\t#out[\"initial_standalone\"] = [statistic.stdev_avg(x) for x in out[\"rand_indexes\"]]\n\t\tfor i in compiled_data:\n\t\t\tif i is not \"rand_indexes\" and i is not \"initial_standalone\":\n\t\t\t\tout[i] = [item for sublist in compiled_data[i] for item in sublist]\n\t\t\t\t#print(out[i])\n\t\t\t\tout[i] = statistic.stdev_avg(out[i])\n\t\tout[\"initial_standalone\"] = {k:statistic.stdev_avg(compiled_data[\"initial_standalone\"][k]) for k in compiled_data[\"initial_standalone\"]}\n\t\treturn out;\n\n\n\tdef execute(self,folder_name,n_of_tests,function,params):\n\t\tdata = self.compile_data(self.test_data_set(folder_name,n_of_tests,function,params))\n\t\treturn (data,self.calculate_avg_stdev(data))\n\nimport py_osm_cluster\ndef test_multiple_datasets(main_folder,n_of_tests,function,params):\n\tshort_folders = list(os.listdir(main_folder))\n\t#folders = [main_folder+\"/\"+i for i in list(os.listdir(main_folder))]\n\tgt = GeneralTest()\n\tfor i in short_folders:\n\t\tunexpected= True\n\t\twhile unexpected:\n\t\t\ttry:\n\t\t\t\tfull_folder =main_folder+\"/\"+i\n\t\t\t\tdata = gt.execute(full_folder,n_of_tests,function,params)\n\t\t\t\t#visu.lineplot(data[1][\"rand_indexes\"],i)\n\t\t\t\tplt.plot([ i[1] for i in data[1][\"rand_indexes\"]],label=i)\n\t\t\t\tf = open(\"output_for:\"+i,\"w\")\n\t\t\t\tf.write(str(data[1]))\n\t\t\t\tunexpected = False\n\t\t\texcept ZeroDivisionError:\n\t\t\t\tunexpected = True\n\tplt.xlabel(\"iteration\")\n\tplt.ylabel(\"Rand index\")\n\tplt.legend(loc='lower right')\n\tplt.show()\n",
"import os\nfrom copy import deepcopy\nfrom py_osm_cluster.util.coords import Coords as Coords\nfrom py_osm_cluster.cluster import scikit\nfrom py_osm_cluster.cluster import partitioning\nfrom py_osm_cluster.eval import comparative as comparative\nfrom py_osm_cluster.eval import standalone as standalone\nfrom py_osm_cluster.cluster import hierarchical\nimport py_osm_cluster.util.statistic as statistic\nimport py_osm_cluster.visualisation.visualisation as visu\nimport py_osm_cluster.visualisation.animation as anim\nimport matplotlib.pyplot as plt\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n \"\"\" compresses the list of dictionaries into dictionary of lists\"\"\"\n\n def compress_dicts(self, dictionary_list):\n out = {x: [] for x in dictionary_list[0]}\n for i in dictionary_list:\n for k in out:\n out[k].append(i[k])\n return out\n\n def harvest_rand_index(self, data_obj):\n self.current_rand_harvest.append(comparative.scikit_rand_index(\n data_obj, self.current_object))\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n\n def test_data_set(self, folder_name, n_of_tests, function, function_kwargs\n ):\n files = [(folder_name + '/' + i) for i in list(os.listdir(folder_name))\n ]\n out = []\n for i in files:\n print('investigating file:' + i)\n out.append(self.test_data_object(i, n_of_tests, function,\n function_kwargs))\n return out\n\n def compile_data(self, test_data_out):\n data = self.compress_dicts(test_data_out)\n data['initial_standalone'] = self.compress_dicts(data[\n 'initial_standalone'])\n data['rand_indexes'] = [i for sublist in data['rand_indexes'] for i in\n sublist]\n return data\n\n def calculate_avg_stdev(self, compiled_data):\n out = {}\n out['rand_indexes'] = list(zip(*compiled_data['rand_indexes']))\n out['rand_indexes'] = [statistic.stdev_avg(x) for x in out[\n 'rand_indexes']]\n print(out['rand_indexes'])\n for i in compiled_data:\n if i is not 'rand_indexes' and i is not 'initial_standalone':\n out[i] = [item for sublist in compiled_data[i] for item in\n sublist]\n out[i] = statistic.stdev_avg(out[i])\n out['initial_standalone'] = {k: statistic.stdev_avg(compiled_data[\n 'initial_standalone'][k]) for k in compiled_data[\n 'initial_standalone']}\n return out\n\n def execute(self, folder_name, n_of_tests, function, params):\n data = self.compile_data(self.test_data_set(folder_name, n_of_tests,\n function, params))\n return data, self.calculate_avg_stdev(data)\n\n\nimport py_osm_cluster\n\n\ndef test_multiple_datasets(main_folder, n_of_tests, function, params):\n short_folders = list(os.listdir(main_folder))\n gt = GeneralTest()\n for i in short_folders:\n unexpected = True\n while unexpected:\n try:\n full_folder = main_folder + '/' + i\n data = gt.execute(full_folder, n_of_tests, function, params)\n plt.plot([i[1] for i in data[1]['rand_indexes']], label=i)\n f = open('output_for:' + i, 'w')\n f.write(str(data[1]))\n unexpected = False\n except ZeroDivisionError:\n unexpected = True\n plt.xlabel('iteration')\n plt.ylabel('Rand index')\n plt.legend(loc='lower right')\n plt.show()\n",
"<import token>\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n \"\"\" compresses the list of dictionaries into dictionary of lists\"\"\"\n\n def compress_dicts(self, dictionary_list):\n out = {x: [] for x in dictionary_list[0]}\n for i in dictionary_list:\n for k in out:\n out[k].append(i[k])\n return out\n\n def harvest_rand_index(self, data_obj):\n self.current_rand_harvest.append(comparative.scikit_rand_index(\n data_obj, self.current_object))\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n\n def test_data_set(self, folder_name, n_of_tests, function, function_kwargs\n ):\n files = [(folder_name + '/' + i) for i in list(os.listdir(folder_name))\n ]\n out = []\n for i in files:\n print('investigating file:' + i)\n out.append(self.test_data_object(i, n_of_tests, function,\n function_kwargs))\n return out\n\n def compile_data(self, test_data_out):\n data = self.compress_dicts(test_data_out)\n data['initial_standalone'] = self.compress_dicts(data[\n 'initial_standalone'])\n data['rand_indexes'] = [i for sublist in data['rand_indexes'] for i in\n sublist]\n return data\n\n def calculate_avg_stdev(self, compiled_data):\n out = {}\n out['rand_indexes'] = list(zip(*compiled_data['rand_indexes']))\n out['rand_indexes'] = [statistic.stdev_avg(x) for x in out[\n 'rand_indexes']]\n print(out['rand_indexes'])\n for i in compiled_data:\n if i is not 'rand_indexes' and i is not 'initial_standalone':\n out[i] = [item for sublist in compiled_data[i] for item in\n sublist]\n out[i] = statistic.stdev_avg(out[i])\n out['initial_standalone'] = {k: statistic.stdev_avg(compiled_data[\n 'initial_standalone'][k]) for k in compiled_data[\n 'initial_standalone']}\n return out\n\n def execute(self, folder_name, n_of_tests, function, params):\n data = self.compile_data(self.test_data_set(folder_name, n_of_tests,\n function, params))\n return data, self.calculate_avg_stdev(data)\n\n\n<import token>\n\n\ndef test_multiple_datasets(main_folder, n_of_tests, function, params):\n short_folders = list(os.listdir(main_folder))\n gt = GeneralTest()\n for i in short_folders:\n unexpected = True\n while unexpected:\n try:\n full_folder = main_folder + '/' + i\n data = gt.execute(full_folder, n_of_tests, function, params)\n plt.plot([i[1] for i in data[1]['rand_indexes']], label=i)\n f = open('output_for:' + i, 'w')\n f.write(str(data[1]))\n unexpected = False\n except ZeroDivisionError:\n unexpected = True\n plt.xlabel('iteration')\n plt.ylabel('Rand index')\n plt.legend(loc='lower right')\n plt.show()\n",
"<import token>\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n \"\"\" compresses the list of dictionaries into dictionary of lists\"\"\"\n\n def compress_dicts(self, dictionary_list):\n out = {x: [] for x in dictionary_list[0]}\n for i in dictionary_list:\n for k in out:\n out[k].append(i[k])\n return out\n\n def harvest_rand_index(self, data_obj):\n self.current_rand_harvest.append(comparative.scikit_rand_index(\n data_obj, self.current_object))\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n\n def test_data_set(self, folder_name, n_of_tests, function, function_kwargs\n ):\n files = [(folder_name + '/' + i) for i in list(os.listdir(folder_name))\n ]\n out = []\n for i in files:\n print('investigating file:' + i)\n out.append(self.test_data_object(i, n_of_tests, function,\n function_kwargs))\n return out\n\n def compile_data(self, test_data_out):\n data = self.compress_dicts(test_data_out)\n data['initial_standalone'] = self.compress_dicts(data[\n 'initial_standalone'])\n data['rand_indexes'] = [i for sublist in data['rand_indexes'] for i in\n sublist]\n return data\n\n def calculate_avg_stdev(self, compiled_data):\n out = {}\n out['rand_indexes'] = list(zip(*compiled_data['rand_indexes']))\n out['rand_indexes'] = [statistic.stdev_avg(x) for x in out[\n 'rand_indexes']]\n print(out['rand_indexes'])\n for i in compiled_data:\n if i is not 'rand_indexes' and i is not 'initial_standalone':\n out[i] = [item for sublist in compiled_data[i] for item in\n sublist]\n out[i] = statistic.stdev_avg(out[i])\n out['initial_standalone'] = {k: statistic.stdev_avg(compiled_data[\n 'initial_standalone'][k]) for k in compiled_data[\n 'initial_standalone']}\n return out\n\n def execute(self, folder_name, n_of_tests, function, params):\n data = self.compile_data(self.test_data_set(folder_name, n_of_tests,\n function, params))\n return data, self.calculate_avg_stdev(data)\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n <docstring token>\n\n def compress_dicts(self, dictionary_list):\n out = {x: [] for x in dictionary_list[0]}\n for i in dictionary_list:\n for k in out:\n out[k].append(i[k])\n return out\n\n def harvest_rand_index(self, data_obj):\n self.current_rand_harvest.append(comparative.scikit_rand_index(\n data_obj, self.current_object))\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n\n def test_data_set(self, folder_name, n_of_tests, function, function_kwargs\n ):\n files = [(folder_name + '/' + i) for i in list(os.listdir(folder_name))\n ]\n out = []\n for i in files:\n print('investigating file:' + i)\n out.append(self.test_data_object(i, n_of_tests, function,\n function_kwargs))\n return out\n\n def compile_data(self, test_data_out):\n data = self.compress_dicts(test_data_out)\n data['initial_standalone'] = self.compress_dicts(data[\n 'initial_standalone'])\n data['rand_indexes'] = [i for sublist in data['rand_indexes'] for i in\n sublist]\n return data\n\n def calculate_avg_stdev(self, compiled_data):\n out = {}\n out['rand_indexes'] = list(zip(*compiled_data['rand_indexes']))\n out['rand_indexes'] = [statistic.stdev_avg(x) for x in out[\n 'rand_indexes']]\n print(out['rand_indexes'])\n for i in compiled_data:\n if i is not 'rand_indexes' and i is not 'initial_standalone':\n out[i] = [item for sublist in compiled_data[i] for item in\n sublist]\n out[i] = statistic.stdev_avg(out[i])\n out['initial_standalone'] = {k: statistic.stdev_avg(compiled_data[\n 'initial_standalone'][k]) for k in compiled_data[\n 'initial_standalone']}\n return out\n\n def execute(self, folder_name, n_of_tests, function, params):\n data = self.compile_data(self.test_data_set(folder_name, n_of_tests,\n function, params))\n return data, self.calculate_avg_stdev(data)\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n <docstring token>\n\n def compress_dicts(self, dictionary_list):\n out = {x: [] for x in dictionary_list[0]}\n for i in dictionary_list:\n for k in out:\n out[k].append(i[k])\n return out\n\n def harvest_rand_index(self, data_obj):\n self.current_rand_harvest.append(comparative.scikit_rand_index(\n data_obj, self.current_object))\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n <function token>\n\n def compile_data(self, test_data_out):\n data = self.compress_dicts(test_data_out)\n data['initial_standalone'] = self.compress_dicts(data[\n 'initial_standalone'])\n data['rand_indexes'] = [i for sublist in data['rand_indexes'] for i in\n sublist]\n return data\n\n def calculate_avg_stdev(self, compiled_data):\n out = {}\n out['rand_indexes'] = list(zip(*compiled_data['rand_indexes']))\n out['rand_indexes'] = [statistic.stdev_avg(x) for x in out[\n 'rand_indexes']]\n print(out['rand_indexes'])\n for i in compiled_data:\n if i is not 'rand_indexes' and i is not 'initial_standalone':\n out[i] = [item for sublist in compiled_data[i] for item in\n sublist]\n out[i] = statistic.stdev_avg(out[i])\n out['initial_standalone'] = {k: statistic.stdev_avg(compiled_data[\n 'initial_standalone'][k]) for k in compiled_data[\n 'initial_standalone']}\n return out\n\n def execute(self, folder_name, n_of_tests, function, params):\n data = self.compile_data(self.test_data_set(folder_name, n_of_tests,\n function, params))\n return data, self.calculate_avg_stdev(data)\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n <docstring token>\n\n def compress_dicts(self, dictionary_list):\n out = {x: [] for x in dictionary_list[0]}\n for i in dictionary_list:\n for k in out:\n out[k].append(i[k])\n return out\n <function token>\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n <function token>\n\n def compile_data(self, test_data_out):\n data = self.compress_dicts(test_data_out)\n data['initial_standalone'] = self.compress_dicts(data[\n 'initial_standalone'])\n data['rand_indexes'] = [i for sublist in data['rand_indexes'] for i in\n sublist]\n return data\n\n def calculate_avg_stdev(self, compiled_data):\n out = {}\n out['rand_indexes'] = list(zip(*compiled_data['rand_indexes']))\n out['rand_indexes'] = [statistic.stdev_avg(x) for x in out[\n 'rand_indexes']]\n print(out['rand_indexes'])\n for i in compiled_data:\n if i is not 'rand_indexes' and i is not 'initial_standalone':\n out[i] = [item for sublist in compiled_data[i] for item in\n sublist]\n out[i] = statistic.stdev_avg(out[i])\n out['initial_standalone'] = {k: statistic.stdev_avg(compiled_data[\n 'initial_standalone'][k]) for k in compiled_data[\n 'initial_standalone']}\n return out\n\n def execute(self, folder_name, n_of_tests, function, params):\n data = self.compile_data(self.test_data_set(folder_name, n_of_tests,\n function, params))\n return data, self.calculate_avg_stdev(data)\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n <docstring token>\n <function token>\n <function token>\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n <function token>\n\n def compile_data(self, test_data_out):\n data = self.compress_dicts(test_data_out)\n data['initial_standalone'] = self.compress_dicts(data[\n 'initial_standalone'])\n data['rand_indexes'] = [i for sublist in data['rand_indexes'] for i in\n sublist]\n return data\n\n def calculate_avg_stdev(self, compiled_data):\n out = {}\n out['rand_indexes'] = list(zip(*compiled_data['rand_indexes']))\n out['rand_indexes'] = [statistic.stdev_avg(x) for x in out[\n 'rand_indexes']]\n print(out['rand_indexes'])\n for i in compiled_data:\n if i is not 'rand_indexes' and i is not 'initial_standalone':\n out[i] = [item for sublist in compiled_data[i] for item in\n sublist]\n out[i] = statistic.stdev_avg(out[i])\n out['initial_standalone'] = {k: statistic.stdev_avg(compiled_data[\n 'initial_standalone'][k]) for k in compiled_data[\n 'initial_standalone']}\n return out\n\n def execute(self, folder_name, n_of_tests, function, params):\n data = self.compile_data(self.test_data_set(folder_name, n_of_tests,\n function, params))\n return data, self.calculate_avg_stdev(data)\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n <docstring token>\n <function token>\n <function token>\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n <function token>\n\n def compile_data(self, test_data_out):\n data = self.compress_dicts(test_data_out)\n data['initial_standalone'] = self.compress_dicts(data[\n 'initial_standalone'])\n data['rand_indexes'] = [i for sublist in data['rand_indexes'] for i in\n sublist]\n return data\n\n def calculate_avg_stdev(self, compiled_data):\n out = {}\n out['rand_indexes'] = list(zip(*compiled_data['rand_indexes']))\n out['rand_indexes'] = [statistic.stdev_avg(x) for x in out[\n 'rand_indexes']]\n print(out['rand_indexes'])\n for i in compiled_data:\n if i is not 'rand_indexes' and i is not 'initial_standalone':\n out[i] = [item for sublist in compiled_data[i] for item in\n sublist]\n out[i] = statistic.stdev_avg(out[i])\n out['initial_standalone'] = {k: statistic.stdev_avg(compiled_data[\n 'initial_standalone'][k]) for k in compiled_data[\n 'initial_standalone']}\n return out\n <function token>\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n <docstring token>\n <function token>\n <function token>\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n <function token>\n <function token>\n\n def calculate_avg_stdev(self, compiled_data):\n out = {}\n out['rand_indexes'] = list(zip(*compiled_data['rand_indexes']))\n out['rand_indexes'] = [statistic.stdev_avg(x) for x in out[\n 'rand_indexes']]\n print(out['rand_indexes'])\n for i in compiled_data:\n if i is not 'rand_indexes' and i is not 'initial_standalone':\n out[i] = [item for sublist in compiled_data[i] for item in\n sublist]\n out[i] = statistic.stdev_avg(out[i])\n out['initial_standalone'] = {k: statistic.stdev_avg(compiled_data[\n 'initial_standalone'][k]) for k in compiled_data[\n 'initial_standalone']}\n return out\n <function token>\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\nclass GeneralTest:\n\n def __init__(self):\n self.current_object = None\n self.current_rand_harvest = []\n <docstring token>\n <function token>\n <function token>\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\nclass GeneralTest:\n <function token>\n <docstring token>\n <function token>\n <function token>\n\n def test_data_object(self, file, n_of_tests, function, function_kwargs):\n function_kwargs['on_step'] = self.harvest_rand_index\n data_obj = Coords()\n data_obj.read_file(file)\n self.current_object = data_obj\n r_initial_standalone_dict = standalone.standard_scores_dict(data_obj)\n r_standalone = []\n r_comparative = []\n r_rand_indexes = []\n for i in range(n_of_tests):\n self.current_rand_harvest = []\n temp_data_obj = deepcopy(data_obj)\n temp_data_obj = function(data_obj, **function_kwargs)\n r_rand_indexes.append(self.current_rand_harvest)\n r_standalone.append(standalone.standard_scores_dict(temp_data_obj))\n r_comparative.append(comparative.scikit_all_scores_dict(\n temp_data_obj, self.current_object))\n out = {'rand_indexes': r_rand_indexes, 'initial_standalone':\n r_initial_standalone_dict}\n out.update(self.compress_dicts(r_standalone))\n out.update(self.compress_dicts(r_comparative))\n return out\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<import token>\n<function token>\n",
"<import token>\n\n\nclass GeneralTest:\n <function token>\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<import token>\n<function token>\n",
"<import token>\n<class token>\n<import token>\n<function token>\n"
] | false |
98,669 |
3551af55e37d65ba532676ad1914bca0a0f78cf6
|
# while loop
num = 0
while num < 6:
print(num)
num += 1
# for loops with ranges
for num in range(6):
print(num)
for num in range(0, 6):
print(num)
for num in range(1, 11, 2):
print(num)
for num in range(10, 0, -1):
print(num)
# Looping through a list
nums = [0, 1, 2, 3, 4, 5] # list
for num in nums:
print(num)
# Looping through a tuple
nums = (0, 1, 2, 3, 4, 5) # tuple
for num in nums:
print(num)
# Looping through a dictionary
grades = {'English':97, 'Math':93, 'Art':74, 'Music':86}
for course in grades:
print(course)
for course in grades.keys():
print(course)
for grade in grades.values():
print(grade)
for course, grade in grades.items():
print(f'{course}: {grade}')
for item in grades.items():
course = item[0]
grade = item[1]
print(f'{course}: {grade}')
# break
for num in range(11, 20):
print(num)
if num % 5 == 0:
break
# continue
for num in range(1, 12):
if num % 5 == 0:
continue
print(num)
|
[
"# while loop\nnum = 0\nwhile num < 6:\n print(num)\n num += 1\n\n# for loops with ranges\nfor num in range(6):\n print(num)\n\nfor num in range(0, 6):\n print(num)\n\nfor num in range(1, 11, 2):\n print(num)\n\nfor num in range(10, 0, -1):\n print(num)\n\n# Looping through a list\nnums = [0, 1, 2, 3, 4, 5] # list\nfor num in nums:\n print(num)\n\n# Looping through a tuple\nnums = (0, 1, 2, 3, 4, 5) # tuple\nfor num in nums:\n print(num)\n\n# Looping through a dictionary\ngrades = {'English':97, 'Math':93, 'Art':74, 'Music':86}\n\nfor course in grades:\n print(course)\n\nfor course in grades.keys():\n print(course)\n\nfor grade in grades.values():\n print(grade)\n\nfor course, grade in grades.items():\n print(f'{course}: {grade}')\n\nfor item in grades.items():\n course = item[0]\n grade = item[1]\n print(f'{course}: {grade}')\n\n# break\nfor num in range(11, 20):\n print(num)\n if num % 5 == 0:\n break\n\n# continue\nfor num in range(1, 12):\n if num % 5 == 0:\n continue\n print(num)",
"num = 0\nwhile num < 6:\n print(num)\n num += 1\nfor num in range(6):\n print(num)\nfor num in range(0, 6):\n print(num)\nfor num in range(1, 11, 2):\n print(num)\nfor num in range(10, 0, -1):\n print(num)\nnums = [0, 1, 2, 3, 4, 5]\nfor num in nums:\n print(num)\nnums = 0, 1, 2, 3, 4, 5\nfor num in nums:\n print(num)\ngrades = {'English': 97, 'Math': 93, 'Art': 74, 'Music': 86}\nfor course in grades:\n print(course)\nfor course in grades.keys():\n print(course)\nfor grade in grades.values():\n print(grade)\nfor course, grade in grades.items():\n print(f'{course}: {grade}')\nfor item in grades.items():\n course = item[0]\n grade = item[1]\n print(f'{course}: {grade}')\nfor num in range(11, 20):\n print(num)\n if num % 5 == 0:\n break\nfor num in range(1, 12):\n if num % 5 == 0:\n continue\n print(num)\n",
"<assignment token>\nwhile num < 6:\n print(num)\n num += 1\nfor num in range(6):\n print(num)\nfor num in range(0, 6):\n print(num)\nfor num in range(1, 11, 2):\n print(num)\nfor num in range(10, 0, -1):\n print(num)\n<assignment token>\nfor num in nums:\n print(num)\n<assignment token>\nfor num in nums:\n print(num)\n<assignment token>\nfor course in grades:\n print(course)\nfor course in grades.keys():\n print(course)\nfor grade in grades.values():\n print(grade)\nfor course, grade in grades.items():\n print(f'{course}: {grade}')\nfor item in grades.items():\n course = item[0]\n grade = item[1]\n print(f'{course}: {grade}')\nfor num in range(11, 20):\n print(num)\n if num % 5 == 0:\n break\nfor num in range(1, 12):\n if num % 5 == 0:\n continue\n print(num)\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,670 |
a817098038bc7e041db61cf4769c1cdaa15f15cc
|
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1070
# -*- coding: utf-8 -*-
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
entrada = int(raw_input())
if entrada % 2 == 0:
entrada += 1
for i in xrange(6):
print entrada + i*2
|
[
"# Ivan Carvalho\n# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1070\n# -*- coding: utf-8 -*-\n\n'''\nEscreva a sua solução aqui\nCode your solution here\nEscriba su solución aquí\n'''\nentrada = int(raw_input())\nif entrada % 2 == 0:\n entrada += 1\nfor i in xrange(6):\n print entrada + i*2\n"
] | true |
98,671 |
b9f4921e8eb4afba634e8308e20bf61c761619ab
|
import RPi.GPIO as GPIO
import time
from flask import Flask, jsonify
def init_gpio_pins():
GPIO.setmode(GPIO.BCM)
pinlist = [2, 3]
for i in pinlist:
GPIO.setup(i, GPIO.OUT)
GPIO.output(i, GPIO.HIGH)
def unlock(duration):
GPIO.output(relay_1, GPIO.LOW)
time.sleep(duration)
GPIO.output(relay_1, GPIO.HIGH)
GPIO.cleanup()
#GPIO pin mappings
relay_1 = 2 #Door strike (normally open)
relay_2 = 3 #Unused
#Set relay close timer
unlock_duration = 5 #Number of seconds to keep door unlocked
#API endpoint to unlock door strike
app = Flask(__name__)
@app.route("/unlock")
def home():
init_gpio_pins()
return jsonify({"status":"door was unlocked for 5 seconds"}), 200
unlock(unlock_duration)
#return jsonify({"status":"door was unlocked for 5 seconds"}), 200
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
[
"import RPi.GPIO as GPIO\nimport time\nfrom flask import Flask, jsonify\n\ndef init_gpio_pins():\n GPIO.setmode(GPIO.BCM)\n pinlist = [2, 3]\n for i in pinlist:\n GPIO.setup(i, GPIO.OUT)\n GPIO.output(i, GPIO.HIGH)\n\ndef unlock(duration):\n GPIO.output(relay_1, GPIO.LOW)\n time.sleep(duration)\n GPIO.output(relay_1, GPIO.HIGH)\n GPIO.cleanup()\n\n#GPIO pin mappings\nrelay_1 = 2 #Door strike (normally open)\nrelay_2 = 3 #Unused\n\n#Set relay close timer\nunlock_duration = 5 #Number of seconds to keep door unlocked\n\n#API endpoint to unlock door strike\napp = Flask(__name__)\n\[email protected](\"/unlock\")\ndef home():\n init_gpio_pins()\n return jsonify({\"status\":\"door was unlocked for 5 seconds\"}), 200\n unlock(unlock_duration)\n #return jsonify({\"status\":\"door was unlocked for 5 seconds\"}), 200\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')",
"import RPi.GPIO as GPIO\nimport time\nfrom flask import Flask, jsonify\n\n\ndef init_gpio_pins():\n GPIO.setmode(GPIO.BCM)\n pinlist = [2, 3]\n for i in pinlist:\n GPIO.setup(i, GPIO.OUT)\n GPIO.output(i, GPIO.HIGH)\n\n\ndef unlock(duration):\n GPIO.output(relay_1, GPIO.LOW)\n time.sleep(duration)\n GPIO.output(relay_1, GPIO.HIGH)\n GPIO.cleanup()\n\n\nrelay_1 = 2\nrelay_2 = 3\nunlock_duration = 5\napp = Flask(__name__)\n\n\[email protected]('/unlock')\ndef home():\n init_gpio_pins()\n return jsonify({'status': 'door was unlocked for 5 seconds'}), 200\n unlock(unlock_duration)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n",
"<import token>\n\n\ndef init_gpio_pins():\n GPIO.setmode(GPIO.BCM)\n pinlist = [2, 3]\n for i in pinlist:\n GPIO.setup(i, GPIO.OUT)\n GPIO.output(i, GPIO.HIGH)\n\n\ndef unlock(duration):\n GPIO.output(relay_1, GPIO.LOW)\n time.sleep(duration)\n GPIO.output(relay_1, GPIO.HIGH)\n GPIO.cleanup()\n\n\nrelay_1 = 2\nrelay_2 = 3\nunlock_duration = 5\napp = Flask(__name__)\n\n\[email protected]('/unlock')\ndef home():\n init_gpio_pins()\n return jsonify({'status': 'door was unlocked for 5 seconds'}), 200\n unlock(unlock_duration)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n",
"<import token>\n\n\ndef init_gpio_pins():\n GPIO.setmode(GPIO.BCM)\n pinlist = [2, 3]\n for i in pinlist:\n GPIO.setup(i, GPIO.OUT)\n GPIO.output(i, GPIO.HIGH)\n\n\ndef unlock(duration):\n GPIO.output(relay_1, GPIO.LOW)\n time.sleep(duration)\n GPIO.output(relay_1, GPIO.HIGH)\n GPIO.cleanup()\n\n\n<assignment token>\n\n\[email protected]('/unlock')\ndef home():\n init_gpio_pins()\n return jsonify({'status': 'door was unlocked for 5 seconds'}), 200\n unlock(unlock_duration)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n",
"<import token>\n\n\ndef init_gpio_pins():\n GPIO.setmode(GPIO.BCM)\n pinlist = [2, 3]\n for i in pinlist:\n GPIO.setup(i, GPIO.OUT)\n GPIO.output(i, GPIO.HIGH)\n\n\ndef unlock(duration):\n GPIO.output(relay_1, GPIO.LOW)\n time.sleep(duration)\n GPIO.output(relay_1, GPIO.HIGH)\n GPIO.cleanup()\n\n\n<assignment token>\n\n\[email protected]('/unlock')\ndef home():\n init_gpio_pins()\n return jsonify({'status': 'door was unlocked for 5 seconds'}), 200\n unlock(unlock_duration)\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef unlock(duration):\n GPIO.output(relay_1, GPIO.LOW)\n time.sleep(duration)\n GPIO.output(relay_1, GPIO.HIGH)\n GPIO.cleanup()\n\n\n<assignment token>\n\n\[email protected]('/unlock')\ndef home():\n init_gpio_pins()\n return jsonify({'status': 'door was unlocked for 5 seconds'}), 200\n unlock(unlock_duration)\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef unlock(duration):\n GPIO.output(relay_1, GPIO.LOW)\n time.sleep(duration)\n GPIO.output(relay_1, GPIO.HIGH)\n GPIO.cleanup()\n\n\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
98,672 |
b0d82fac4e6c12ccb6258411b96c3f2f929f9bd2
|
import gui
gui.start_gui()
|
[
"import gui\n\n\ngui.start_gui()\n\n\n",
"import gui\ngui.start_gui()\n",
"<import token>\ngui.start_gui()\n",
"<import token>\n<code token>\n"
] | false |
98,673 |
8a5938ab84f6aad68fefe992776da1573ae6fd2f
|
from .configure import Configure
from .create_user import CreateUser
from .list_routes import ListRoutes
from .load_nanopub import LoadNanopub
from .retire_nanopub import RetireNanopub
from .run_interpreter import RunInterpreter
from .runserver import WhyisServer
from .test import Test
from .test_agent import TestAgent
from .update_user import UpdateUser
from .uninstall_app import UninstallApp
|
[
"from .configure import Configure\nfrom .create_user import CreateUser\nfrom .list_routes import ListRoutes\nfrom .load_nanopub import LoadNanopub\nfrom .retire_nanopub import RetireNanopub\nfrom .run_interpreter import RunInterpreter\nfrom .runserver import WhyisServer\nfrom .test import Test\nfrom .test_agent import TestAgent\nfrom .update_user import UpdateUser\nfrom .uninstall_app import UninstallApp\n",
"<import token>\n"
] | false |
98,674 |
516e44bef5354265ab42fb6b513096f96e17d3fe
|
'''
算法:模拟。需要借用同nums1相同大小的数组。
'''
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
nums3 = nums1[:m]
i = 0
j = 0
k = 0
while i < m and j < n:
if nums3[i] < nums2[j]:
nums1[k] = nums3[i]
i += 1
else:
nums1[k] = nums2[j]
j += 1
k += 1
for v in range(i, m):
nums1[k] = nums3[v]
k += 1
for v in range(j, n):
nums1[k] = nums2[v]
k += 1
|
[
"'''\n算法:模拟。需要借用同nums1相同大小的数组。\n'''\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n nums3 = nums1[:m]\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums3[i] < nums2[j]:\n nums1[k] = nums3[i]\n i += 1\n else:\n nums1[k] = nums2[j]\n j += 1\n k += 1\n \n for v in range(i, m):\n nums1[k] = nums3[v]\n k += 1\n \n for v in range(j, n):\n nums1[k] = nums2[v]\n k += 1",
"<docstring token>\n\n\nclass Solution:\n\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) ->None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n nums3 = nums1[:m]\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums3[i] < nums2[j]:\n nums1[k] = nums3[i]\n i += 1\n else:\n nums1[k] = nums2[j]\n j += 1\n k += 1\n for v in range(i, m):\n nums1[k] = nums3[v]\n k += 1\n for v in range(j, n):\n nums1[k] = nums2[v]\n k += 1\n",
"<docstring token>\n\n\nclass Solution:\n <function token>\n",
"<docstring token>\n<class token>\n"
] | false |
98,675 |
d563827e534e8959c222e71d39892c8b5933ee16
|
import numpy as np
from models.Particle import Particle
# Create a particle to model a cannonball
cannonball = Particle(
'cannonball', 3, velocity=np.array([10, 5, 3], dtype=float), position=np.array([0, 0, 0], dtype=float))
# Set delta_t to 0.01 seconds for Euler approximations
delta_t = 0.01
# create a list to record position over time
trajectory = [cannonball.position]
# run the simulation
while (cannonball.position[2] >= 0):
cannonball.update(delta_t, "EULER")
trajectory.append(cannonball.position)
summary = '{0} travelled {1}m.'.format(cannonball.name, cannonball.position[0])
print(summary)
print('Attempting to write to file')
file_contents = np.asarray(trajectory)
np.savetxt('trajectory.csv', file_contents, delimiter=',')
print('File saved successfully')
|
[
"import numpy as np\nfrom models.Particle import Particle\n\n# Create a particle to model a cannonball\ncannonball = Particle(\n 'cannonball', 3, velocity=np.array([10, 5, 3], dtype=float), position=np.array([0, 0, 0], dtype=float))\n\n# Set delta_t to 0.01 seconds for Euler approximations\ndelta_t = 0.01\n\n# create a list to record position over time\ntrajectory = [cannonball.position]\n\n# run the simulation\nwhile (cannonball.position[2] >= 0):\n cannonball.update(delta_t, \"EULER\")\n trajectory.append(cannonball.position)\n\n\nsummary = '{0} travelled {1}m.'.format(cannonball.name, cannonball.position[0])\n\nprint(summary)\nprint('Attempting to write to file')\n\nfile_contents = np.asarray(trajectory)\nnp.savetxt('trajectory.csv', file_contents, delimiter=',')\n\nprint('File saved successfully')\n",
"import numpy as np\nfrom models.Particle import Particle\ncannonball = Particle('cannonball', 3, velocity=np.array([10, 5, 3], dtype=\n float), position=np.array([0, 0, 0], dtype=float))\ndelta_t = 0.01\ntrajectory = [cannonball.position]\nwhile cannonball.position[2] >= 0:\n cannonball.update(delta_t, 'EULER')\n trajectory.append(cannonball.position)\nsummary = '{0} travelled {1}m.'.format(cannonball.name, cannonball.position[0])\nprint(summary)\nprint('Attempting to write to file')\nfile_contents = np.asarray(trajectory)\nnp.savetxt('trajectory.csv', file_contents, delimiter=',')\nprint('File saved successfully')\n",
"<import token>\ncannonball = Particle('cannonball', 3, velocity=np.array([10, 5, 3], dtype=\n float), position=np.array([0, 0, 0], dtype=float))\ndelta_t = 0.01\ntrajectory = [cannonball.position]\nwhile cannonball.position[2] >= 0:\n cannonball.update(delta_t, 'EULER')\n trajectory.append(cannonball.position)\nsummary = '{0} travelled {1}m.'.format(cannonball.name, cannonball.position[0])\nprint(summary)\nprint('Attempting to write to file')\nfile_contents = np.asarray(trajectory)\nnp.savetxt('trajectory.csv', file_contents, delimiter=',')\nprint('File saved successfully')\n",
"<import token>\n<assignment token>\nwhile cannonball.position[2] >= 0:\n cannonball.update(delta_t, 'EULER')\n trajectory.append(cannonball.position)\n<assignment token>\nprint(summary)\nprint('Attempting to write to file')\n<assignment token>\nnp.savetxt('trajectory.csv', file_contents, delimiter=',')\nprint('File saved successfully')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,676 |
348f15f6f5334e6006585dbfa3f692db1d77ca14
|
'''
Based on:
Generating p(e|m) index
https://github.com/informagi/REL/blob/master/scripts/code_tutorials/generate_p_e_m.py
'''
from REL.wikipedia import Wikipedia
from REL.wikipedia_yago_freq import WikipediaYagoFreq
import json
wiki_version = "wiki_2019"
base_url = "/home/hvwesten/Projects/thesis/data/"
# Open file with clueweb counts, choose between ClueWeb09 or ClueWeb09+12
input_url = './save_folder/00_clueweb_full_9.json'
# input_url = './save_folder/00_clueweb_full_9_12.json'
print(f"input_url: {input_url}")
with open(input_url, 'r') as f:
clueweb_dict = json.load(f)
# Import helper functions; store p(e|m) index etc in class.
print("Loading wikipedia files")
wikipedia = Wikipedia(base_url, wiki_version)
# Init class
wiki_yago_freq = WikipediaYagoFreq(base_url, wiki_version, wikipedia)
# All the different configurations, uncomment the one you want to use
'''
Baseline:
Compute Wiki+Crosswiki with YAGO p(e|m)
'''
wiki_yago_freq.compute_wiki()
wiki_yago_freq.compute_custom()
'''
# 1. W + CW + C9 (+ C12):
Compute Wiki+Crosswiki with Clueweb p(e|m)
'''
# wiki_yago_freq.compute_wiki()
# wiki_yago_freq.compute_custom(clueweb_dict)
'''
# 2. W + C9 (+ C12) + Y:
Compute Wiki+Clueweb and YAGO p(e|m)
'''
# wiki_yago_freq.compute_wiki(custom_add=clueweb_dict)
# wiki_yago_freq.compute_custom()
'''
# 3. W + CW + C9 (+ C12) + Y :
Compute WIKI + CROSSWIKI + CLUE and YAGO (= ALL)
'''
# wiki_yago_freq.compute_wiki(special_case="all", custom_main=clueweb_dict)
# wiki_yago_freq.compute_custom()
'''
# 4. CW + C9 (+ C12) + Y:
Compute Cross+Clueweb and YAGO p(e|m)
'''
# wiki_yago_freq.compute_wiki(custom_main=clueweb_dict)
# wiki_yago_freq.compute_custom()
'''
# 5. CW + C9 (+ C12) :
Compute Cross and Clue
'''
# wiki_yago_freq.compute_wiki(special_case="only_crosswiki")
# wiki_yago_freq.compute_custom(clueweb_dict)
'''
# 6. C9 (+ C12) + Y:
Just Clueweb9_12+YAGO or Clueweb9+YAGO
'''
# wiki_yago_freq.compute_wiki(special_case="only_clueweb", custom_main=clueweb_dict)
# wiki_yago_freq.compute_custom()
'''
# 7. C9 (+ C12):
Just Clueweb9_12 or ClueWeb9
'''
# wiki_yago_freq.compute_wiki(special_case="only_clueweb", custom_main=clueweb_dict)
# Store dictionary in sqlite3 database
wiki_yago_freq.store()
|
[
"'''\n Based on:\n Generating p(e|m) index\n https://github.com/informagi/REL/blob/master/scripts/code_tutorials/generate_p_e_m.py\n\n'''\n\nfrom REL.wikipedia import Wikipedia\nfrom REL.wikipedia_yago_freq import WikipediaYagoFreq\n\nimport json\n\nwiki_version = \"wiki_2019\"\nbase_url = \"/home/hvwesten/Projects/thesis/data/\"\n\n# Open file with clueweb counts, choose between ClueWeb09 or ClueWeb09+12\ninput_url = './save_folder/00_clueweb_full_9.json'\n# input_url = './save_folder/00_clueweb_full_9_12.json'\n\nprint(f\"input_url: {input_url}\")\n\nwith open(input_url, 'r') as f:\n clueweb_dict = json.load(f)\n\n# Import helper functions; store p(e|m) index etc in class.\nprint(\"Loading wikipedia files\")\nwikipedia = Wikipedia(base_url, wiki_version)\n\n# Init class\nwiki_yago_freq = WikipediaYagoFreq(base_url, wiki_version, wikipedia)\n\n\n# All the different configurations, uncomment the one you want to use\n'''\nBaseline:\n Compute Wiki+Crosswiki with YAGO p(e|m)\n'''\nwiki_yago_freq.compute_wiki()\nwiki_yago_freq.compute_custom()\n\n'''\n# 1. W + CW + C9 (+ C12):\n Compute Wiki+Crosswiki with Clueweb p(e|m)\n'''\n# wiki_yago_freq.compute_wiki()\n# wiki_yago_freq.compute_custom(clueweb_dict)\n\n'''\n# 2. W + C9 (+ C12) + Y:\n Compute Wiki+Clueweb and YAGO p(e|m)\n'''\n# wiki_yago_freq.compute_wiki(custom_add=clueweb_dict)\n# wiki_yago_freq.compute_custom()\n\n'''\n# 3. W + CW + C9 (+ C12) + Y :\n Compute WIKI + CROSSWIKI + CLUE and YAGO (= ALL)\n'''\n# wiki_yago_freq.compute_wiki(special_case=\"all\", custom_main=clueweb_dict)\n# wiki_yago_freq.compute_custom()\n\n\n'''\n# 4. CW + C9 (+ C12) + Y:\n Compute Cross+Clueweb and YAGO p(e|m)\n'''\n# wiki_yago_freq.compute_wiki(custom_main=clueweb_dict)\n# wiki_yago_freq.compute_custom()\n\n\n'''\n# 5. CW + C9 (+ C12) :\n Compute Cross and Clue\n'''\n# wiki_yago_freq.compute_wiki(special_case=\"only_crosswiki\")\n# wiki_yago_freq.compute_custom(clueweb_dict)\n\n\n'''\n# 6. C9 (+ C12) + Y:\n Just Clueweb9_12+YAGO or Clueweb9+YAGO\n'''\n# wiki_yago_freq.compute_wiki(special_case=\"only_clueweb\", custom_main=clueweb_dict)\n# wiki_yago_freq.compute_custom()\n\n\n'''\n# 7. C9 (+ C12):\n Just Clueweb9_12 or ClueWeb9\n'''\n# wiki_yago_freq.compute_wiki(special_case=\"only_clueweb\", custom_main=clueweb_dict)\n\n\n# Store dictionary in sqlite3 database\nwiki_yago_freq.store()",
"<docstring token>\nfrom REL.wikipedia import Wikipedia\nfrom REL.wikipedia_yago_freq import WikipediaYagoFreq\nimport json\nwiki_version = 'wiki_2019'\nbase_url = '/home/hvwesten/Projects/thesis/data/'\ninput_url = './save_folder/00_clueweb_full_9.json'\nprint(f'input_url: {input_url}')\nwith open(input_url, 'r') as f:\n clueweb_dict = json.load(f)\nprint('Loading wikipedia files')\nwikipedia = Wikipedia(base_url, wiki_version)\nwiki_yago_freq = WikipediaYagoFreq(base_url, wiki_version, wikipedia)\n<docstring token>\nwiki_yago_freq.compute_wiki()\nwiki_yago_freq.compute_custom()\n<docstring token>\nwiki_yago_freq.store()\n",
"<docstring token>\n<import token>\nwiki_version = 'wiki_2019'\nbase_url = '/home/hvwesten/Projects/thesis/data/'\ninput_url = './save_folder/00_clueweb_full_9.json'\nprint(f'input_url: {input_url}')\nwith open(input_url, 'r') as f:\n clueweb_dict = json.load(f)\nprint('Loading wikipedia files')\nwikipedia = Wikipedia(base_url, wiki_version)\nwiki_yago_freq = WikipediaYagoFreq(base_url, wiki_version, wikipedia)\n<docstring token>\nwiki_yago_freq.compute_wiki()\nwiki_yago_freq.compute_custom()\n<docstring token>\nwiki_yago_freq.store()\n",
"<docstring token>\n<import token>\n<assignment token>\nprint(f'input_url: {input_url}')\nwith open(input_url, 'r') as f:\n clueweb_dict = json.load(f)\nprint('Loading wikipedia files')\n<assignment token>\n<docstring token>\nwiki_yago_freq.compute_wiki()\nwiki_yago_freq.compute_custom()\n<docstring token>\nwiki_yago_freq.store()\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<docstring token>\n<code token>\n<docstring token>\n<code token>\n"
] | false |
98,677 |
6d90cb43b14c291973161edf74a2939a08633d51
|
class Editor:
def __init__(self, gridSize):
self.gridSize = gridSize
def draw(self, surface):
pass
|
[
"\n\nclass Editor:\n\n def __init__(self, gridSize):\n self.gridSize = gridSize\n\n def draw(self, surface):\n pass",
"class Editor:\n\n def __init__(self, gridSize):\n self.gridSize = gridSize\n\n def draw(self, surface):\n pass\n",
"class Editor:\n\n def __init__(self, gridSize):\n self.gridSize = gridSize\n <function token>\n",
"class Editor:\n <function token>\n <function token>\n",
"<class token>\n"
] | false |
98,678 |
1c9d4f16e43c2fb15dbcc97244a4818d83aaaca9
|
from unittest.mock import MagicMock
import pytest
from pydantic import ValidationError
from ..common.model import WebsiteCheckResult
def test_send_and_receive(kafka_sender, kafka_receiver, check_result):
kafka_sender.send(check_result)
data = kafka_receiver.pop()
assert isinstance(data, WebsiteCheckResult), 'Invalid data type received'
assert data == check_result, 'Received data should be the same as sent'
def test_send_invalid_data(kafka_sender):
data = 'test'
with pytest.raises(AttributeError) as e:
kafka_sender.send(data)
assert f"'str' object has no attribute 'to_bytes'" in str(e.value)
def test_receive_invalid_data(kafka_sender, kafka_receiver):
data = MagicMock()
data.to_bytes = lambda: str.encode('utf-8')
kafka_sender.send(data)
with pytest.raises(ValidationError) as e:
kafka_receiver.pop()
assert 'validation error for WebsiteCheckResult' in str(e.value)
|
[
"from unittest.mock import MagicMock\n\nimport pytest\nfrom pydantic import ValidationError\n\nfrom ..common.model import WebsiteCheckResult\n\n\ndef test_send_and_receive(kafka_sender, kafka_receiver, check_result):\n kafka_sender.send(check_result)\n data = kafka_receiver.pop()\n assert isinstance(data, WebsiteCheckResult), 'Invalid data type received'\n assert data == check_result, 'Received data should be the same as sent'\n\n\ndef test_send_invalid_data(kafka_sender):\n data = 'test'\n with pytest.raises(AttributeError) as e:\n kafka_sender.send(data)\n\n assert f\"'str' object has no attribute 'to_bytes'\" in str(e.value)\n\n\ndef test_receive_invalid_data(kafka_sender, kafka_receiver):\n data = MagicMock()\n data.to_bytes = lambda: str.encode('utf-8')\n kafka_sender.send(data)\n\n with pytest.raises(ValidationError) as e:\n kafka_receiver.pop()\n\n assert 'validation error for WebsiteCheckResult' in str(e.value)\n\n\n",
"from unittest.mock import MagicMock\nimport pytest\nfrom pydantic import ValidationError\nfrom ..common.model import WebsiteCheckResult\n\n\ndef test_send_and_receive(kafka_sender, kafka_receiver, check_result):\n kafka_sender.send(check_result)\n data = kafka_receiver.pop()\n assert isinstance(data, WebsiteCheckResult), 'Invalid data type received'\n assert data == check_result, 'Received data should be the same as sent'\n\n\ndef test_send_invalid_data(kafka_sender):\n data = 'test'\n with pytest.raises(AttributeError) as e:\n kafka_sender.send(data)\n assert f\"'str' object has no attribute 'to_bytes'\" in str(e.value)\n\n\ndef test_receive_invalid_data(kafka_sender, kafka_receiver):\n data = MagicMock()\n data.to_bytes = lambda : str.encode('utf-8')\n kafka_sender.send(data)\n with pytest.raises(ValidationError) as e:\n kafka_receiver.pop()\n assert 'validation error for WebsiteCheckResult' in str(e.value)\n",
"<import token>\n\n\ndef test_send_and_receive(kafka_sender, kafka_receiver, check_result):\n kafka_sender.send(check_result)\n data = kafka_receiver.pop()\n assert isinstance(data, WebsiteCheckResult), 'Invalid data type received'\n assert data == check_result, 'Received data should be the same as sent'\n\n\ndef test_send_invalid_data(kafka_sender):\n data = 'test'\n with pytest.raises(AttributeError) as e:\n kafka_sender.send(data)\n assert f\"'str' object has no attribute 'to_bytes'\" in str(e.value)\n\n\ndef test_receive_invalid_data(kafka_sender, kafka_receiver):\n data = MagicMock()\n data.to_bytes = lambda : str.encode('utf-8')\n kafka_sender.send(data)\n with pytest.raises(ValidationError) as e:\n kafka_receiver.pop()\n assert 'validation error for WebsiteCheckResult' in str(e.value)\n",
"<import token>\n\n\ndef test_send_and_receive(kafka_sender, kafka_receiver, check_result):\n kafka_sender.send(check_result)\n data = kafka_receiver.pop()\n assert isinstance(data, WebsiteCheckResult), 'Invalid data type received'\n assert data == check_result, 'Received data should be the same as sent'\n\n\ndef test_send_invalid_data(kafka_sender):\n data = 'test'\n with pytest.raises(AttributeError) as e:\n kafka_sender.send(data)\n assert f\"'str' object has no attribute 'to_bytes'\" in str(e.value)\n\n\n<function token>\n",
"<import token>\n\n\ndef test_send_and_receive(kafka_sender, kafka_receiver, check_result):\n kafka_sender.send(check_result)\n data = kafka_receiver.pop()\n assert isinstance(data, WebsiteCheckResult), 'Invalid data type received'\n assert data == check_result, 'Received data should be the same as sent'\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,679 |
0025a95d04ed027f4a4318a925f168d6192b042e
|
from pygame.sprite import Sprite
from nlc_dino_runner.utils.constants import SCREEN_WIDTH
# Clase padre
class Obstacles(Sprite):
def __init__(self, image, obstacle_type):
self.image = image
self.obstacle_type = obstacle_type
self.rect = self.image[self.obstacle_type].get_rect() #retorna una tupla(x,y)
self.rect.x = SCREEN_WIDTH #1100
def update(self, game_speed, obstacles_list):
self.rect.x -= game_speed
if self.rect.x < -self.rect.width:
obstacles_list.pop()
def draw(self, screen):
screen.blit(self.image[self.obstacle_type], self.rect)
|
[
"from pygame.sprite import Sprite\n\nfrom nlc_dino_runner.utils.constants import SCREEN_WIDTH\n# Clase padre\n\nclass Obstacles(Sprite):\n\n def __init__(self, image, obstacle_type):\n self.image = image\n self.obstacle_type = obstacle_type\n self.rect = self.image[self.obstacle_type].get_rect() #retorna una tupla(x,y)\n self.rect.x = SCREEN_WIDTH #1100\n\n def update(self, game_speed, obstacles_list):\n self.rect.x -= game_speed\n if self.rect.x < -self.rect.width:\n obstacles_list.pop()\n\n def draw(self, screen):\n screen.blit(self.image[self.obstacle_type], self.rect)\n",
"from pygame.sprite import Sprite\nfrom nlc_dino_runner.utils.constants import SCREEN_WIDTH\n\n\nclass Obstacles(Sprite):\n\n def __init__(self, image, obstacle_type):\n self.image = image\n self.obstacle_type = obstacle_type\n self.rect = self.image[self.obstacle_type].get_rect()\n self.rect.x = SCREEN_WIDTH\n\n def update(self, game_speed, obstacles_list):\n self.rect.x -= game_speed\n if self.rect.x < -self.rect.width:\n obstacles_list.pop()\n\n def draw(self, screen):\n screen.blit(self.image[self.obstacle_type], self.rect)\n",
"<import token>\n\n\nclass Obstacles(Sprite):\n\n def __init__(self, image, obstacle_type):\n self.image = image\n self.obstacle_type = obstacle_type\n self.rect = self.image[self.obstacle_type].get_rect()\n self.rect.x = SCREEN_WIDTH\n\n def update(self, game_speed, obstacles_list):\n self.rect.x -= game_speed\n if self.rect.x < -self.rect.width:\n obstacles_list.pop()\n\n def draw(self, screen):\n screen.blit(self.image[self.obstacle_type], self.rect)\n",
"<import token>\n\n\nclass Obstacles(Sprite):\n\n def __init__(self, image, obstacle_type):\n self.image = image\n self.obstacle_type = obstacle_type\n self.rect = self.image[self.obstacle_type].get_rect()\n self.rect.x = SCREEN_WIDTH\n\n def update(self, game_speed, obstacles_list):\n self.rect.x -= game_speed\n if self.rect.x < -self.rect.width:\n obstacles_list.pop()\n <function token>\n",
"<import token>\n\n\nclass Obstacles(Sprite):\n\n def __init__(self, image, obstacle_type):\n self.image = image\n self.obstacle_type = obstacle_type\n self.rect = self.image[self.obstacle_type].get_rect()\n self.rect.x = SCREEN_WIDTH\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Obstacles(Sprite):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,680 |
feb2212fb16c294d0b1fd124668d300ceb505f68
|
from .readData import readData
from .readMarvel import readMarvel
|
[
"from .readData import readData\nfrom .readMarvel import readMarvel",
"from .readData import readData\nfrom .readMarvel import readMarvel\n",
"<import token>\n"
] | false |
98,681 |
76fe2f541f16d73b2bb80c9b236af78dc1dad668
|
#!/usr/bin/python
import yaml
class Milgram:
def __init__(self, graph, target):
self.graph = graph
self.target = target
self.total_distance = 0
self.made_it = 0
def bfs(self, start):
visited, queue = set(), [start]
while queue:
vertex = queue.pop(0)
if vertex not in visited:
visited.add(vertex)
if vertex in self.graph:
queue.extend(set(self.graph[vertex]) - visited)
return visited
def create_matrix(self):
return [[0 for y in range(len(self.graph))] for x in range(len(self.graph))]
def bfs_paths(self, start, goal):
queue = [(start, [start])]
while queue:
(vertex, path) = queue.pop(0)
if vertex in self.graph:
for next in set(self.graph[vertex]) - set(path):
if next == goal:
yield path + [next]
else:
queue.append((next, path + [next]))
def shortest_path(self, start, goal):
try:
return next(self.bfs_paths(start, goal))
except StopIteration:
return None
def run(self):
for page in self.graph:
print page
distance = self.shortest_path(page, self.target)
if distance is not None:
self.total_distance += len(distance) - 1
self.made_it += 1
return float(self.total_distance) / float(self.made_it)
if __name__ == '__main__':
# f = open('wiki.json', 'r')
#
# j = yaml.load(f.read())
#
# print('Loaded the graph')
#
# milgram = Milgram(j, 112875) # USA
#
# print milgram.run()
milgram = Milgram({1: [2, 3], 2: [4], 99: [55]}, 4)
print milgram.create_matrix()
|
[
"#!/usr/bin/python\n\nimport yaml\n\n\nclass Milgram:\n def __init__(self, graph, target):\n self.graph = graph\n self.target = target\n self.total_distance = 0\n self.made_it = 0\n\n def bfs(self, start):\n visited, queue = set(), [start]\n while queue:\n vertex = queue.pop(0)\n if vertex not in visited:\n visited.add(vertex)\n if vertex in self.graph:\n queue.extend(set(self.graph[vertex]) - visited)\n return visited\n\n def create_matrix(self):\n return [[0 for y in range(len(self.graph))] for x in range(len(self.graph))]\n\n def bfs_paths(self, start, goal):\n queue = [(start, [start])]\n while queue:\n (vertex, path) = queue.pop(0)\n if vertex in self.graph:\n for next in set(self.graph[vertex]) - set(path):\n if next == goal:\n yield path + [next]\n else:\n queue.append((next, path + [next]))\n\n def shortest_path(self, start, goal):\n try:\n return next(self.bfs_paths(start, goal))\n except StopIteration:\n return None\n\n def run(self):\n for page in self.graph:\n print page\n distance = self.shortest_path(page, self.target)\n\n if distance is not None:\n self.total_distance += len(distance) - 1\n self.made_it += 1\n\n return float(self.total_distance) / float(self.made_it)\n\n\nif __name__ == '__main__':\n # f = open('wiki.json', 'r')\n #\n # j = yaml.load(f.read())\n #\n # print('Loaded the graph')\n #\n # milgram = Milgram(j, 112875) # USA\n #\n # print milgram.run()\n\n milgram = Milgram({1: [2, 3], 2: [4], 99: [55]}, 4)\n print milgram.create_matrix()\n"
] | true |
98,682 |
0a7371233f0f15366a51fdfafd57ba8d824cf007
|
"""
Test cases for micro-grid systems
The following systems are considered
1. AC micro-grid
2. DC micro-grid
3. Hybrid AC/DC micro-grid
"""
|
[
"\"\"\"\nTest cases for micro-grid systems\nThe following systems are considered\n1. AC micro-grid\n2. DC micro-grid\n3. Hybrid AC/DC micro-grid\n\"\"\"\n",
"<docstring token>\n"
] | false |
98,683 |
bfe53a6c20df5486579eec55cccfab0693658552
|
import pymongo
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
DBserver = pymongo.MongoClient('mongodb://localhost:27017/')
DB = DBserver['ir']
collection = DB['usermsgs']
quiz_set_four = {1, 2, 4, 5}
quiz_set_six = {1, 2, 3, 4, 5, 6}
name_set = set(sorted([int(item['username']) for item in collection.find()]))
# print(len(name_set))
# print(name_set)
quiz_item = []
for name in name_set:
class_flag = -1
cut_flag = 0
temp_set = set()
temp_item = []
for item in collection.find({'username': f"{str(name)}"}):
if class_flag != item['quiz_class']:
if not len(quiz_set_four.symmetric_difference(temp_set)):
# print("SET class flag")
cut_flag = 1
elif not len(quiz_set_six.symmetric_difference(temp_set)):
# print("SET class flag")
cut_flag = 1
if item['quiz_class'] == 3 or item['quiz_class'] == 6:
temp_set.add(item['quiz_class'])
class_flag = item['quiz_class']
continue
if cut_flag == 1:
# print("CUT")
temp_set.clear()
cut_flag = 0
# input("pause")
temp_set.add(item['quiz_class'])
class_flag = item['quiz_class']
temp_item.append(item)
quiz_item.append(temp_item)
# print("End")
# input("pause")
time_slot = []
for ind, item in enumerate(quiz_item):
if len(time_slot) == 0:
time_slot.append([ind])
continue
flag=1
for j, current_time in enumerate(time_slot):
if quiz_item[current_time[0]][0]['Msgdate'] - datetime.timedelta(minutes=8) <= item[0]['Msgdate'] <= quiz_item[current_time[0]][0]['Msgdate'] + datetime.timedelta(minutes=8):
time_slot[j].append(ind)
flag = 0
break
if flag:
time_slot.append([ind])
# df = pd.DataFrame(columns=['name', 'time_slot', 'mode', 'type', 'quiz_number', 'text'])
output = []
simple_stack = []
for ind, item in enumerate(time_slot):
for i in sorted(item):
simple_stack.append(i)
if len(simple_stack)>1:
if simple_stack[-1]%2-1 == simple_stack[-2]%2:
temp_output=quiz_item[i]
temp_output+=quiz_item[i-1]
output.append(temp_output)
simple_stack.pop()
simple_stack.pop()
while len(simple_stack):
output.append(quiz_item[simple_stack[-1]])
simple_stack.pop()
# for i in output:
# print(i)
# 4 5 6 遠距 遠距聯想測驗.xlsx
raw = pd.read_excel('遠距聯想測驗.xlsx')
# print(raw[raw['版本'] == 1]['答案'][0])
# output = output[1:]
for item in output:
_score = [0, 0]
_record_score = [[], []]
_record_time = [[], []]
class_flag = -1
if item[0]['Msgdate'] < datetime.datetime(year=2020, month=6, day=8):
continue
s = sorted(item, key=lambda x: x['Msgdate'])
for i in range(len(item)):
print(item[i]['Msgdate'], item[i]['username'], s[i]['Msgdate'], s[i]['username'])
for ind, i in enumerate(s):
if i['quiz_class'] > 3:
if class_flag == -1:
class_flag = i['quiz_class']
if class_flag != i['quiz_class']:
print(_score[0], _score[1])
print(_record_score[0], _record_score[1])
print(_record_time[0], _record_time[1])
# ========================================================================
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
plt.gca().xaxis.set_major_locator(mdates.HourLocator(interval=10))
plt.setp(plt.gca().xaxis.get_majorticklabels(), rotation=90)
# Plot
plt.plot(_record_time[0], _record_score[0], 'r-^')
plt.plot(_record_time[1], _record_score[1], 'g-^')
plt.show()
_score = [0, 0]
_record_score = [[], []]
_record_time = [[], []]
class_flag = -1
else:
try:
_tmp = raw[raw['版本'] == i['quiz_class']-3]['答案'].values[i['quiz_no']]
except:
print(i['Msgdate'])
break
# print(_tmp.strip())
_score[int(i['username'])%2] += int(_tmp.strip() == i['quiz_ans'])
_record_score[int(i['username'])%2].append(_score[int(i['username'])%2])
_record_time[int(i['username'])%2].append(i['Msgdate'].strftime("%H:%M:%S"))
class_flag = i['quiz_class']
# d = {
# 'name':[item['username']],
# 'time_slot':[],
# 'mode':[],
# 'type':[],
# 'quiz_number':[],
# 'action_number':[]
# }
# df.append(d)
# print(datestr)
# print(type(item))
# print(time_slot)
# print(len(time_slot))
|
[
"import pymongo\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nDBserver = pymongo.MongoClient('mongodb://localhost:27017/')\nDB = DBserver['ir']\ncollection = DB['usermsgs']\n\nquiz_set_four = {1, 2, 4, 5}\nquiz_set_six = {1, 2, 3, 4, 5, 6}\n\nname_set = set(sorted([int(item['username']) for item in collection.find()]))\n# print(len(name_set))\n# print(name_set)\n\nquiz_item = []\nfor name in name_set:\n\n class_flag = -1\n cut_flag = 0\n temp_set = set()\n temp_item = []\n for item in collection.find({'username': f\"{str(name)}\"}):\n\n if class_flag != item['quiz_class']:\n\n if not len(quiz_set_four.symmetric_difference(temp_set)):\n # print(\"SET class flag\")\n cut_flag = 1\n elif not len(quiz_set_six.symmetric_difference(temp_set)):\n # print(\"SET class flag\")\n cut_flag = 1\n\n if item['quiz_class'] == 3 or item['quiz_class'] == 6:\n temp_set.add(item['quiz_class'])\n class_flag = item['quiz_class']\n continue\n\n if cut_flag == 1:\n # print(\"CUT\")\n temp_set.clear()\n cut_flag = 0\n # input(\"pause\")\n\n temp_set.add(item['quiz_class'])\n class_flag = item['quiz_class']\n\n temp_item.append(item)\n\n quiz_item.append(temp_item)\n # print(\"End\")\n # input(\"pause\")\n\ntime_slot = []\nfor ind, item in enumerate(quiz_item):\n if len(time_slot) == 0:\n time_slot.append([ind])\n continue\n flag=1\n for j, current_time in enumerate(time_slot):\n if quiz_item[current_time[0]][0]['Msgdate'] - datetime.timedelta(minutes=8) <= item[0]['Msgdate'] <= quiz_item[current_time[0]][0]['Msgdate'] + datetime.timedelta(minutes=8):\n time_slot[j].append(ind)\n flag = 0\n break\n if flag:\n time_slot.append([ind])\n\n\n# df = pd.DataFrame(columns=['name', 'time_slot', 'mode', 'type', 'quiz_number', 'text'])\noutput = []\nsimple_stack = []\nfor ind, item in enumerate(time_slot):\n for i in sorted(item):\n simple_stack.append(i)\n if len(simple_stack)>1:\n if simple_stack[-1]%2-1 == simple_stack[-2]%2:\n temp_output=quiz_item[i]\n temp_output+=quiz_item[i-1]\n output.append(temp_output)\n simple_stack.pop()\n simple_stack.pop()\n\n\n while len(simple_stack):\n output.append(quiz_item[simple_stack[-1]])\n simple_stack.pop()\n\n# for i in output:\n# print(i)\n\n# 4 5 6 遠距 遠距聯想測驗.xlsx\nraw = pd.read_excel('遠距聯想測驗.xlsx')\n# print(raw[raw['版本'] == 1]['答案'][0])\n# output = output[1:]\nfor item in output:\n _score = [0, 0]\n _record_score = [[], []]\n _record_time = [[], []]\n class_flag = -1\n\n if item[0]['Msgdate'] < datetime.datetime(year=2020, month=6, day=8):\n continue\n\n s = sorted(item, key=lambda x: x['Msgdate'])\n for i in range(len(item)):\n print(item[i]['Msgdate'], item[i]['username'], s[i]['Msgdate'], s[i]['username'])\n for ind, i in enumerate(s):\n if i['quiz_class'] > 3:\n if class_flag == -1:\n class_flag = i['quiz_class']\n\n if class_flag != i['quiz_class']:\n print(_score[0], _score[1])\n print(_record_score[0], _record_score[1])\n print(_record_time[0], _record_time[1])\n\n # ========================================================================\n\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\"%H:%M\"))\n plt.gca().xaxis.set_major_locator(mdates.HourLocator(interval=10))\n plt.setp(plt.gca().xaxis.get_majorticklabels(), rotation=90)\n # Plot\n plt.plot(_record_time[0], _record_score[0], 'r-^')\n plt.plot(_record_time[1], _record_score[1], 'g-^')\n plt.show()\n\n _score = [0, 0]\n _record_score = [[], []]\n _record_time = [[], []]\n class_flag = -1\n else:\n try:\n _tmp = raw[raw['版本'] == i['quiz_class']-3]['答案'].values[i['quiz_no']]\n except:\n print(i['Msgdate'])\n break\n # print(_tmp.strip())\n _score[int(i['username'])%2] += int(_tmp.strip() == i['quiz_ans'])\n _record_score[int(i['username'])%2].append(_score[int(i['username'])%2])\n _record_time[int(i['username'])%2].append(i['Msgdate'].strftime(\"%H:%M:%S\"))\n class_flag = i['quiz_class']\n # d = {\n # 'name':[item['username']],\n # 'time_slot':[],\n # 'mode':[],\n # 'type':[],\n # 'quiz_number':[],\n # 'action_number':[]\n # }\n # df.append(d)\n\n # print(datestr)\n\n # print(type(item))\n# print(time_slot)\n# print(len(time_slot))\n\n\n\n\n\n",
"import pymongo\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nDBserver = pymongo.MongoClient('mongodb://localhost:27017/')\nDB = DBserver['ir']\ncollection = DB['usermsgs']\nquiz_set_four = {1, 2, 4, 5}\nquiz_set_six = {1, 2, 3, 4, 5, 6}\nname_set = set(sorted([int(item['username']) for item in collection.find()]))\nquiz_item = []\nfor name in name_set:\n class_flag = -1\n cut_flag = 0\n temp_set = set()\n temp_item = []\n for item in collection.find({'username': f'{str(name)}'}):\n if class_flag != item['quiz_class']:\n if not len(quiz_set_four.symmetric_difference(temp_set)):\n cut_flag = 1\n elif not len(quiz_set_six.symmetric_difference(temp_set)):\n cut_flag = 1\n if item['quiz_class'] == 3 or item['quiz_class'] == 6:\n temp_set.add(item['quiz_class'])\n class_flag = item['quiz_class']\n continue\n if cut_flag == 1:\n temp_set.clear()\n cut_flag = 0\n temp_set.add(item['quiz_class'])\n class_flag = item['quiz_class']\n temp_item.append(item)\n quiz_item.append(temp_item)\ntime_slot = []\nfor ind, item in enumerate(quiz_item):\n if len(time_slot) == 0:\n time_slot.append([ind])\n continue\n flag = 1\n for j, current_time in enumerate(time_slot):\n if quiz_item[current_time[0]][0]['Msgdate'] - datetime.timedelta(\n minutes=8) <= item[0]['Msgdate'] <= quiz_item[current_time[0]][0][\n 'Msgdate'] + datetime.timedelta(minutes=8):\n time_slot[j].append(ind)\n flag = 0\n break\n if flag:\n time_slot.append([ind])\noutput = []\nsimple_stack = []\nfor ind, item in enumerate(time_slot):\n for i in sorted(item):\n simple_stack.append(i)\n if len(simple_stack) > 1:\n if simple_stack[-1] % 2 - 1 == simple_stack[-2] % 2:\n temp_output = quiz_item[i]\n temp_output += quiz_item[i - 1]\n output.append(temp_output)\n simple_stack.pop()\n simple_stack.pop()\n while len(simple_stack):\n output.append(quiz_item[simple_stack[-1]])\n simple_stack.pop()\nraw = pd.read_excel('遠距聯想測驗.xlsx')\nfor item in output:\n _score = [0, 0]\n _record_score = [[], []]\n _record_time = [[], []]\n class_flag = -1\n if item[0]['Msgdate'] < datetime.datetime(year=2020, month=6, day=8):\n continue\n s = sorted(item, key=lambda x: x['Msgdate'])\n for i in range(len(item)):\n print(item[i]['Msgdate'], item[i]['username'], s[i]['Msgdate'], s[i\n ]['username'])\n for ind, i in enumerate(s):\n if i['quiz_class'] > 3:\n if class_flag == -1:\n class_flag = i['quiz_class']\n if class_flag != i['quiz_class']:\n print(_score[0], _score[1])\n print(_record_score[0], _record_score[1])\n print(_record_time[0], _record_time[1])\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\n '%H:%M'))\n plt.gca().xaxis.set_major_locator(mdates.HourLocator(\n interval=10))\n plt.setp(plt.gca().xaxis.get_majorticklabels(), rotation=90)\n plt.plot(_record_time[0], _record_score[0], 'r-^')\n plt.plot(_record_time[1], _record_score[1], 'g-^')\n plt.show()\n _score = [0, 0]\n _record_score = [[], []]\n _record_time = [[], []]\n class_flag = -1\n else:\n try:\n _tmp = raw[raw['版本'] == i['quiz_class'] - 3]['答案'].values[i\n ['quiz_no']]\n except:\n print(i['Msgdate'])\n break\n _score[int(i['username']) % 2] += int(_tmp.strip() == i[\n 'quiz_ans'])\n _record_score[int(i['username']) % 2].append(_score[int(i[\n 'username']) % 2])\n _record_time[int(i['username']) % 2].append(i['Msgdate'].\n strftime('%H:%M:%S'))\n class_flag = i['quiz_class']\n",
"<import token>\nDBserver = pymongo.MongoClient('mongodb://localhost:27017/')\nDB = DBserver['ir']\ncollection = DB['usermsgs']\nquiz_set_four = {1, 2, 4, 5}\nquiz_set_six = {1, 2, 3, 4, 5, 6}\nname_set = set(sorted([int(item['username']) for item in collection.find()]))\nquiz_item = []\nfor name in name_set:\n class_flag = -1\n cut_flag = 0\n temp_set = set()\n temp_item = []\n for item in collection.find({'username': f'{str(name)}'}):\n if class_flag != item['quiz_class']:\n if not len(quiz_set_four.symmetric_difference(temp_set)):\n cut_flag = 1\n elif not len(quiz_set_six.symmetric_difference(temp_set)):\n cut_flag = 1\n if item['quiz_class'] == 3 or item['quiz_class'] == 6:\n temp_set.add(item['quiz_class'])\n class_flag = item['quiz_class']\n continue\n if cut_flag == 1:\n temp_set.clear()\n cut_flag = 0\n temp_set.add(item['quiz_class'])\n class_flag = item['quiz_class']\n temp_item.append(item)\n quiz_item.append(temp_item)\ntime_slot = []\nfor ind, item in enumerate(quiz_item):\n if len(time_slot) == 0:\n time_slot.append([ind])\n continue\n flag = 1\n for j, current_time in enumerate(time_slot):\n if quiz_item[current_time[0]][0]['Msgdate'] - datetime.timedelta(\n minutes=8) <= item[0]['Msgdate'] <= quiz_item[current_time[0]][0][\n 'Msgdate'] + datetime.timedelta(minutes=8):\n time_slot[j].append(ind)\n flag = 0\n break\n if flag:\n time_slot.append([ind])\noutput = []\nsimple_stack = []\nfor ind, item in enumerate(time_slot):\n for i in sorted(item):\n simple_stack.append(i)\n if len(simple_stack) > 1:\n if simple_stack[-1] % 2 - 1 == simple_stack[-2] % 2:\n temp_output = quiz_item[i]\n temp_output += quiz_item[i - 1]\n output.append(temp_output)\n simple_stack.pop()\n simple_stack.pop()\n while len(simple_stack):\n output.append(quiz_item[simple_stack[-1]])\n simple_stack.pop()\nraw = pd.read_excel('遠距聯想測驗.xlsx')\nfor item in output:\n _score = [0, 0]\n _record_score = [[], []]\n _record_time = [[], []]\n class_flag = -1\n if item[0]['Msgdate'] < datetime.datetime(year=2020, month=6, day=8):\n continue\n s = sorted(item, key=lambda x: x['Msgdate'])\n for i in range(len(item)):\n print(item[i]['Msgdate'], item[i]['username'], s[i]['Msgdate'], s[i\n ]['username'])\n for ind, i in enumerate(s):\n if i['quiz_class'] > 3:\n if class_flag == -1:\n class_flag = i['quiz_class']\n if class_flag != i['quiz_class']:\n print(_score[0], _score[1])\n print(_record_score[0], _record_score[1])\n print(_record_time[0], _record_time[1])\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\n '%H:%M'))\n plt.gca().xaxis.set_major_locator(mdates.HourLocator(\n interval=10))\n plt.setp(plt.gca().xaxis.get_majorticklabels(), rotation=90)\n plt.plot(_record_time[0], _record_score[0], 'r-^')\n plt.plot(_record_time[1], _record_score[1], 'g-^')\n plt.show()\n _score = [0, 0]\n _record_score = [[], []]\n _record_time = [[], []]\n class_flag = -1\n else:\n try:\n _tmp = raw[raw['版本'] == i['quiz_class'] - 3]['答案'].values[i\n ['quiz_no']]\n except:\n print(i['Msgdate'])\n break\n _score[int(i['username']) % 2] += int(_tmp.strip() == i[\n 'quiz_ans'])\n _record_score[int(i['username']) % 2].append(_score[int(i[\n 'username']) % 2])\n _record_time[int(i['username']) % 2].append(i['Msgdate'].\n strftime('%H:%M:%S'))\n class_flag = i['quiz_class']\n",
"<import token>\n<assignment token>\nfor name in name_set:\n class_flag = -1\n cut_flag = 0\n temp_set = set()\n temp_item = []\n for item in collection.find({'username': f'{str(name)}'}):\n if class_flag != item['quiz_class']:\n if not len(quiz_set_four.symmetric_difference(temp_set)):\n cut_flag = 1\n elif not len(quiz_set_six.symmetric_difference(temp_set)):\n cut_flag = 1\n if item['quiz_class'] == 3 or item['quiz_class'] == 6:\n temp_set.add(item['quiz_class'])\n class_flag = item['quiz_class']\n continue\n if cut_flag == 1:\n temp_set.clear()\n cut_flag = 0\n temp_set.add(item['quiz_class'])\n class_flag = item['quiz_class']\n temp_item.append(item)\n quiz_item.append(temp_item)\n<assignment token>\nfor ind, item in enumerate(quiz_item):\n if len(time_slot) == 0:\n time_slot.append([ind])\n continue\n flag = 1\n for j, current_time in enumerate(time_slot):\n if quiz_item[current_time[0]][0]['Msgdate'] - datetime.timedelta(\n minutes=8) <= item[0]['Msgdate'] <= quiz_item[current_time[0]][0][\n 'Msgdate'] + datetime.timedelta(minutes=8):\n time_slot[j].append(ind)\n flag = 0\n break\n if flag:\n time_slot.append([ind])\n<assignment token>\nfor ind, item in enumerate(time_slot):\n for i in sorted(item):\n simple_stack.append(i)\n if len(simple_stack) > 1:\n if simple_stack[-1] % 2 - 1 == simple_stack[-2] % 2:\n temp_output = quiz_item[i]\n temp_output += quiz_item[i - 1]\n output.append(temp_output)\n simple_stack.pop()\n simple_stack.pop()\n while len(simple_stack):\n output.append(quiz_item[simple_stack[-1]])\n simple_stack.pop()\n<assignment token>\nfor item in output:\n _score = [0, 0]\n _record_score = [[], []]\n _record_time = [[], []]\n class_flag = -1\n if item[0]['Msgdate'] < datetime.datetime(year=2020, month=6, day=8):\n continue\n s = sorted(item, key=lambda x: x['Msgdate'])\n for i in range(len(item)):\n print(item[i]['Msgdate'], item[i]['username'], s[i]['Msgdate'], s[i\n ]['username'])\n for ind, i in enumerate(s):\n if i['quiz_class'] > 3:\n if class_flag == -1:\n class_flag = i['quiz_class']\n if class_flag != i['quiz_class']:\n print(_score[0], _score[1])\n print(_record_score[0], _record_score[1])\n print(_record_time[0], _record_time[1])\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\n '%H:%M'))\n plt.gca().xaxis.set_major_locator(mdates.HourLocator(\n interval=10))\n plt.setp(plt.gca().xaxis.get_majorticklabels(), rotation=90)\n plt.plot(_record_time[0], _record_score[0], 'r-^')\n plt.plot(_record_time[1], _record_score[1], 'g-^')\n plt.show()\n _score = [0, 0]\n _record_score = [[], []]\n _record_time = [[], []]\n class_flag = -1\n else:\n try:\n _tmp = raw[raw['版本'] == i['quiz_class'] - 3]['答案'].values[i\n ['quiz_no']]\n except:\n print(i['Msgdate'])\n break\n _score[int(i['username']) % 2] += int(_tmp.strip() == i[\n 'quiz_ans'])\n _record_score[int(i['username']) % 2].append(_score[int(i[\n 'username']) % 2])\n _record_time[int(i['username']) % 2].append(i['Msgdate'].\n strftime('%H:%M:%S'))\n class_flag = i['quiz_class']\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,684 |
6cf0c3a7a91701b39e66b48d9ee365cac8040020
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.utils import *
##CEREMA=group
##reseau=vector line
##prefixe=optional string
##sens= field reseau
##fichier_noeuds=output vector
layer=processing.getObject(reseau)
nom_champs=[]
for i in layer.dataProvider().fields():
nom_champs.append(i.name())
if ("i" not in nom_champs):
layer.dataProvider().addAttributes([QgsField("i",QVariant.String,len=15)])
if ("j" not in nom_champs):
layer.dataProvider().addAttributes([QgsField("j",QVariant.String,len=15)])
if ("ij" not in nom_champs):
layer.dataProvider().addAttributes([QgsField("ij",QVariant.String,len=31)])
layer.updateFields()
layer.commitChanges()
ida=layer.fieldNameIndex("i")
idb=layer.fieldNameIndex("j")
idij=layer.fieldNameIndex("ij")
lines=layer.getFeatures()
noeuds={}
nom_fichier=fichier_noeuds
champs=QgsFields()
champs.append(QgsField("num",QVariant.String,len=35))
champs.append(QgsField("nb",QVariant.Int))
table_noeuds=QgsVectorFileWriter(nom_fichier,"UTF-8",champs,QGis.WKBPoint,layer.crs(),"ESRI Shapefile")
src=QgsCoordinateReferenceSystem(layer.crs())
dest=QgsCoordinateReferenceSystem(4326)
xtr=QgsCoordinateTransform(src,dest)
for ligne in lines:
gligne=ligne.geometry()
if ligne[sens]=='1':
if gligne.wkbType()==QGis.WKBMultiLineString:
g=gligne.asMultiPolyline()
na=g[0][0]
liba=str(int(xtr.transform(na)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(na)[1]*1e6+180*1e6)).zfill(9)
nb=g[-1][-1]
libb=str(int(xtr.transform(nb)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(nb)[1]*1e6+180*1e6)).zfill(9)
elif gligne.wkbType()==QGis.WKBLineString:
g=gligne.asPolyline()
na=g[0]
liba=str(int(xtr.transform(na)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(na)[1]*1e6+180*1e6)).zfill(9)
nb=g[-1]
libb=str(int(xtr.transform(nb)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(nb)[1]*1e6+180*1e6)).zfill(9)
if (na not in noeuds):
noeuds[na]=(prefixe+liba,1)
else:
noeuds[na]=(prefixe+liba,noeuds[na][1]+1)
if (nb not in noeuds):
noeuds[nb]=(prefixe+libb,1)
else:
noeuds[nb]=(prefixe+libb,noeuds[nb][1]+1)
#outs=open("c:/temp/noeuds.txt","w")
for i,n in enumerate(noeuds):
node=QgsFeature()
node.setGeometry(QgsGeometry.fromPoint(QgsPoint(n[0],n[1])))
#node.setAttributes([noeuds[n]])
node.setAttributes([noeuds[n][0],noeuds[n][1]])
table_noeuds.addFeature(node)
#outs.write(str(n)+";"+str(noeuds[n])+"\n")
del table_noeuds
#outs.close()
lines=layer.getFeatures()
layer.startEditing()
layer.beginEditCommand(QCoreApplication.translate("Building graph","Building graph"))
for ligne in lines:
if ligne[sens]==1:
gligne=ligne.geometry()
if gligne.wkbType()==QGis.WKBMultiLineString:
g=gligne.asMultiPolyline()
na=g[0][0]
nb=g[-1][-1]
liba=str(int(xtr.transform(na)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(na)[1]*1e6+180*1e6)).zfill(9)
libb=str(int(xtr.transform(nb)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(nb)[1]*1e6+180*1e6)).zfill(9)
elif gligne.wkbType()==QGis.WKBLineString:
g=gligne.asPolyline()
na=g[0]
nb=g[-1]
liba=str(int(xtr.transform(na)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(na)[1]*1e6+180*1e6)).zfill(9)
libb=str(int(xtr.transform(nb)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(nb)[1]*1e6+180*1e6)).zfill(9)
id=ligne.id()
#valid={ida : noeuds[na], idb: noeuds[nb]}
layer.changeAttributeValue(id,ida, noeuds[na])
layer.changeAttributeValue(id,idb, noeuds[nb])
layer.changeAttributeValue(id,idij, noeuds[na]+"-"+noeuds[nb])
layer.endEditCommand()
layer.commitChanges()
|
[
"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\nfrom qgis.utils import *\n\n##CEREMA=group\n##reseau=vector line\n##prefixe=optional string\n##sens= field reseau\n##fichier_noeuds=output vector\n\n\n\nlayer=processing.getObject(reseau)\nnom_champs=[]\nfor i in layer.dataProvider().fields():\n nom_champs.append(i.name())\nif (\"i\" not in nom_champs):\n layer.dataProvider().addAttributes([QgsField(\"i\",QVariant.String,len=15)])\nif (\"j\" not in nom_champs):\n layer.dataProvider().addAttributes([QgsField(\"j\",QVariant.String,len=15)])\nif (\"ij\" not in nom_champs):\n layer.dataProvider().addAttributes([QgsField(\"ij\",QVariant.String,len=31)])\nlayer.updateFields()\nlayer.commitChanges()\nida=layer.fieldNameIndex(\"i\")\nidb=layer.fieldNameIndex(\"j\")\nidij=layer.fieldNameIndex(\"ij\")\nlines=layer.getFeatures()\nnoeuds={}\nnom_fichier=fichier_noeuds\nchamps=QgsFields()\nchamps.append(QgsField(\"num\",QVariant.String,len=35))\nchamps.append(QgsField(\"nb\",QVariant.Int))\ntable_noeuds=QgsVectorFileWriter(nom_fichier,\"UTF-8\",champs,QGis.WKBPoint,layer.crs(),\"ESRI Shapefile\")\nsrc=QgsCoordinateReferenceSystem(layer.crs())\ndest=QgsCoordinateReferenceSystem(4326)\nxtr=QgsCoordinateTransform(src,dest)\nfor ligne in lines:\n gligne=ligne.geometry()\n if ligne[sens]=='1':\n if gligne.wkbType()==QGis.WKBMultiLineString:\n g=gligne.asMultiPolyline()\n na=g[0][0]\n liba=str(int(xtr.transform(na)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(na)[1]*1e6+180*1e6)).zfill(9)\n nb=g[-1][-1]\n libb=str(int(xtr.transform(nb)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(nb)[1]*1e6+180*1e6)).zfill(9)\n \n elif gligne.wkbType()==QGis.WKBLineString:\n g=gligne.asPolyline()\n na=g[0]\n liba=str(int(xtr.transform(na)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(na)[1]*1e6+180*1e6)).zfill(9)\n nb=g[-1]\n libb=str(int(xtr.transform(nb)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(nb)[1]*1e6+180*1e6)).zfill(9)\n if (na not in noeuds):\n noeuds[na]=(prefixe+liba,1)\n else:\n noeuds[na]=(prefixe+liba,noeuds[na][1]+1)\n if (nb not in noeuds):\n noeuds[nb]=(prefixe+libb,1)\n else:\n noeuds[nb]=(prefixe+libb,noeuds[nb][1]+1)\n#outs=open(\"c:/temp/noeuds.txt\",\"w\")\nfor i,n in enumerate(noeuds):\n node=QgsFeature()\n node.setGeometry(QgsGeometry.fromPoint(QgsPoint(n[0],n[1])))\n #node.setAttributes([noeuds[n]])\n node.setAttributes([noeuds[n][0],noeuds[n][1]])\n table_noeuds.addFeature(node)\n#outs.write(str(n)+\";\"+str(noeuds[n])+\"\\n\")\ndel table_noeuds\n#outs.close()\nlines=layer.getFeatures()\nlayer.startEditing()\nlayer.beginEditCommand(QCoreApplication.translate(\"Building graph\",\"Building graph\"))\nfor ligne in lines:\n if ligne[sens]==1:\n gligne=ligne.geometry()\n if gligne.wkbType()==QGis.WKBMultiLineString:\n \n g=gligne.asMultiPolyline()\n\n na=g[0][0]\n nb=g[-1][-1]\n liba=str(int(xtr.transform(na)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(na)[1]*1e6+180*1e6)).zfill(9)\n libb=str(int(xtr.transform(nb)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(nb)[1]*1e6+180*1e6)).zfill(9)\n elif gligne.wkbType()==QGis.WKBLineString:\n\n g=gligne.asPolyline()\n na=g[0]\n nb=g[-1]\n liba=str(int(xtr.transform(na)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(na)[1]*1e6+180*1e6)).zfill(9)\n\n libb=str(int(xtr.transform(nb)[0]*1e6+180*1e6)).zfill(9)+str(int(xtr.transform(nb)[1]*1e6+180*1e6)).zfill(9)\n\n\n id=ligne.id()\n #valid={ida : noeuds[na], idb: noeuds[nb]}\n\n layer.changeAttributeValue(id,ida, noeuds[na])\n layer.changeAttributeValue(id,idb, noeuds[nb])\n layer.changeAttributeValue(id,idij, noeuds[na]+\"-\"+noeuds[nb])\n\nlayer.endEditCommand()\nlayer.commitChanges()\n",
"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\nfrom qgis.utils import *\nlayer = processing.getObject(reseau)\nnom_champs = []\nfor i in layer.dataProvider().fields():\n nom_champs.append(i.name())\nif 'i' not in nom_champs:\n layer.dataProvider().addAttributes([QgsField('i', QVariant.String, len=15)]\n )\nif 'j' not in nom_champs:\n layer.dataProvider().addAttributes([QgsField('j', QVariant.String, len=15)]\n )\nif 'ij' not in nom_champs:\n layer.dataProvider().addAttributes([QgsField('ij', QVariant.String, len\n =31)])\nlayer.updateFields()\nlayer.commitChanges()\nida = layer.fieldNameIndex('i')\nidb = layer.fieldNameIndex('j')\nidij = layer.fieldNameIndex('ij')\nlines = layer.getFeatures()\nnoeuds = {}\nnom_fichier = fichier_noeuds\nchamps = QgsFields()\nchamps.append(QgsField('num', QVariant.String, len=35))\nchamps.append(QgsField('nb', QVariant.Int))\ntable_noeuds = QgsVectorFileWriter(nom_fichier, 'UTF-8', champs, QGis.\n WKBPoint, layer.crs(), 'ESRI Shapefile')\nsrc = QgsCoordinateReferenceSystem(layer.crs())\ndest = QgsCoordinateReferenceSystem(4326)\nxtr = QgsCoordinateTransform(src, dest)\nfor ligne in lines:\n gligne = ligne.geometry()\n if ligne[sens] == '1':\n if gligne.wkbType() == QGis.WKBMultiLineString:\n g = gligne.asMultiPolyline()\n na = g[0][0]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n nb = g[-1][-1]\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n elif gligne.wkbType() == QGis.WKBLineString:\n g = gligne.asPolyline()\n na = g[0]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n nb = g[-1]\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n if na not in noeuds:\n noeuds[na] = prefixe + liba, 1\n else:\n noeuds[na] = prefixe + liba, noeuds[na][1] + 1\n if nb not in noeuds:\n noeuds[nb] = prefixe + libb, 1\n else:\n noeuds[nb] = prefixe + libb, noeuds[nb][1] + 1\nfor i, n in enumerate(noeuds):\n node = QgsFeature()\n node.setGeometry(QgsGeometry.fromPoint(QgsPoint(n[0], n[1])))\n node.setAttributes([noeuds[n][0], noeuds[n][1]])\n table_noeuds.addFeature(node)\ndel table_noeuds\nlines = layer.getFeatures()\nlayer.startEditing()\nlayer.beginEditCommand(QCoreApplication.translate('Building graph',\n 'Building graph'))\nfor ligne in lines:\n if ligne[sens] == 1:\n gligne = ligne.geometry()\n if gligne.wkbType() == QGis.WKBMultiLineString:\n g = gligne.asMultiPolyline()\n na = g[0][0]\n nb = g[-1][-1]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n elif gligne.wkbType() == QGis.WKBLineString:\n g = gligne.asPolyline()\n na = g[0]\n nb = g[-1]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n id = ligne.id()\n layer.changeAttributeValue(id, ida, noeuds[na])\n layer.changeAttributeValue(id, idb, noeuds[nb])\n layer.changeAttributeValue(id, idij, noeuds[na] + '-' + noeuds[nb])\nlayer.endEditCommand()\nlayer.commitChanges()\n",
"<import token>\nlayer = processing.getObject(reseau)\nnom_champs = []\nfor i in layer.dataProvider().fields():\n nom_champs.append(i.name())\nif 'i' not in nom_champs:\n layer.dataProvider().addAttributes([QgsField('i', QVariant.String, len=15)]\n )\nif 'j' not in nom_champs:\n layer.dataProvider().addAttributes([QgsField('j', QVariant.String, len=15)]\n )\nif 'ij' not in nom_champs:\n layer.dataProvider().addAttributes([QgsField('ij', QVariant.String, len\n =31)])\nlayer.updateFields()\nlayer.commitChanges()\nida = layer.fieldNameIndex('i')\nidb = layer.fieldNameIndex('j')\nidij = layer.fieldNameIndex('ij')\nlines = layer.getFeatures()\nnoeuds = {}\nnom_fichier = fichier_noeuds\nchamps = QgsFields()\nchamps.append(QgsField('num', QVariant.String, len=35))\nchamps.append(QgsField('nb', QVariant.Int))\ntable_noeuds = QgsVectorFileWriter(nom_fichier, 'UTF-8', champs, QGis.\n WKBPoint, layer.crs(), 'ESRI Shapefile')\nsrc = QgsCoordinateReferenceSystem(layer.crs())\ndest = QgsCoordinateReferenceSystem(4326)\nxtr = QgsCoordinateTransform(src, dest)\nfor ligne in lines:\n gligne = ligne.geometry()\n if ligne[sens] == '1':\n if gligne.wkbType() == QGis.WKBMultiLineString:\n g = gligne.asMultiPolyline()\n na = g[0][0]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n nb = g[-1][-1]\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n elif gligne.wkbType() == QGis.WKBLineString:\n g = gligne.asPolyline()\n na = g[0]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n nb = g[-1]\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n if na not in noeuds:\n noeuds[na] = prefixe + liba, 1\n else:\n noeuds[na] = prefixe + liba, noeuds[na][1] + 1\n if nb not in noeuds:\n noeuds[nb] = prefixe + libb, 1\n else:\n noeuds[nb] = prefixe + libb, noeuds[nb][1] + 1\nfor i, n in enumerate(noeuds):\n node = QgsFeature()\n node.setGeometry(QgsGeometry.fromPoint(QgsPoint(n[0], n[1])))\n node.setAttributes([noeuds[n][0], noeuds[n][1]])\n table_noeuds.addFeature(node)\ndel table_noeuds\nlines = layer.getFeatures()\nlayer.startEditing()\nlayer.beginEditCommand(QCoreApplication.translate('Building graph',\n 'Building graph'))\nfor ligne in lines:\n if ligne[sens] == 1:\n gligne = ligne.geometry()\n if gligne.wkbType() == QGis.WKBMultiLineString:\n g = gligne.asMultiPolyline()\n na = g[0][0]\n nb = g[-1][-1]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n elif gligne.wkbType() == QGis.WKBLineString:\n g = gligne.asPolyline()\n na = g[0]\n nb = g[-1]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n id = ligne.id()\n layer.changeAttributeValue(id, ida, noeuds[na])\n layer.changeAttributeValue(id, idb, noeuds[nb])\n layer.changeAttributeValue(id, idij, noeuds[na] + '-' + noeuds[nb])\nlayer.endEditCommand()\nlayer.commitChanges()\n",
"<import token>\n<assignment token>\nfor i in layer.dataProvider().fields():\n nom_champs.append(i.name())\nif 'i' not in nom_champs:\n layer.dataProvider().addAttributes([QgsField('i', QVariant.String, len=15)]\n )\nif 'j' not in nom_champs:\n layer.dataProvider().addAttributes([QgsField('j', QVariant.String, len=15)]\n )\nif 'ij' not in nom_champs:\n layer.dataProvider().addAttributes([QgsField('ij', QVariant.String, len\n =31)])\nlayer.updateFields()\nlayer.commitChanges()\n<assignment token>\nchamps.append(QgsField('num', QVariant.String, len=35))\nchamps.append(QgsField('nb', QVariant.Int))\n<assignment token>\nfor ligne in lines:\n gligne = ligne.geometry()\n if ligne[sens] == '1':\n if gligne.wkbType() == QGis.WKBMultiLineString:\n g = gligne.asMultiPolyline()\n na = g[0][0]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n nb = g[-1][-1]\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n elif gligne.wkbType() == QGis.WKBLineString:\n g = gligne.asPolyline()\n na = g[0]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n nb = g[-1]\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n if na not in noeuds:\n noeuds[na] = prefixe + liba, 1\n else:\n noeuds[na] = prefixe + liba, noeuds[na][1] + 1\n if nb not in noeuds:\n noeuds[nb] = prefixe + libb, 1\n else:\n noeuds[nb] = prefixe + libb, noeuds[nb][1] + 1\nfor i, n in enumerate(noeuds):\n node = QgsFeature()\n node.setGeometry(QgsGeometry.fromPoint(QgsPoint(n[0], n[1])))\n node.setAttributes([noeuds[n][0], noeuds[n][1]])\n table_noeuds.addFeature(node)\ndel table_noeuds\n<assignment token>\nlayer.startEditing()\nlayer.beginEditCommand(QCoreApplication.translate('Building graph',\n 'Building graph'))\nfor ligne in lines:\n if ligne[sens] == 1:\n gligne = ligne.geometry()\n if gligne.wkbType() == QGis.WKBMultiLineString:\n g = gligne.asMultiPolyline()\n na = g[0][0]\n nb = g[-1][-1]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n elif gligne.wkbType() == QGis.WKBLineString:\n g = gligne.asPolyline()\n na = g[0]\n nb = g[-1]\n liba = str(int(xtr.transform(na)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(na)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n libb = str(int(xtr.transform(nb)[0] * 1000000.0 + 180 * 1000000.0)\n ).zfill(9) + str(int(xtr.transform(nb)[1] * 1000000.0 + 180 *\n 1000000.0)).zfill(9)\n id = ligne.id()\n layer.changeAttributeValue(id, ida, noeuds[na])\n layer.changeAttributeValue(id, idb, noeuds[nb])\n layer.changeAttributeValue(id, idij, noeuds[na] + '-' + noeuds[nb])\nlayer.endEditCommand()\nlayer.commitChanges()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,685 |
38a154418f3a3df2ed45b720f3e0008f2d9deb4e
|
from card_test_remove_keys import *
|
[
"from card_test_remove_keys import *\n",
"<import token>\n"
] | false |
98,686 |
ac329c3ba09366e45ce87fda8115a5cad4fba92a
|
import pygame
from pygame import *
SCREEN_SIZE = pygame.Rect((0, 0, 800, 640))
class Dialog():
def __init__(self, pos, surface):
self.dialogues = dict()
self.image = Surface((SCREEN_SIZE.width, SCREEN_SIZE.height))
self.image.fill(Color("#800080"))
self.rect = self.image.get_rect(topleft=pos)
self.hasControl = False
self.hide = True # change to self.visible
self.textFont = pygame.font.SysFont("Comic Sans", 35)
self.screen = surface
#add hide() and show() functions.
def draw(self):
self.screen.blit(self.image, self.rect)
def update(self):
pressed = pygame.key.get_pressed()
up = pressed[pygame.K_UP]
down = pressed[pygame.K_DOWN]
if self.hasControl:
if up:
print("Up key pressed on dialog")
if down:
print("Down key pressed on dialog")
#update texts in the dialog here i guess?
return
def loadText(self, key):
rowLength = 75
text = self.dialogues[key]
print(len(text))
n = len(text)
start = 0
end = 0
rowCounter = 0
while n > rowLength:
#find the end.
for k in range (rowLength+start-1, start, -1):
if text[k] == " ":
end = k
break
print(len(text[start:end]))
textSurface = self.textFont.render(text[start:end], True, (255,255,255))
self.image.blit(textSurface, (0,rowCounter))
rowCounter += 32
truncatedTextLength = end - start + 1
n -= truncatedTextLength
start = end + 1
#add remainder to next row.
textSurface = self.textFont.render(text[start:len(text)], True, (255,255,255))
self.image.blit(textSurface, (0, rowCounter))
print(text)
def read(self, file):
#read file and store in self.texts
infile = open(file, 'r')
keys = []
texts = []
currText = ""
startStoring = False
for line in infile:
line = line.strip('\n')
line = line.split()
if len(line) > 0:
if line[0] == "-":
keys.append(line[1])
startStoring = True
elif line[0] == "*":
texts.append(currText)
currText = ""
startStoring = False
else:
if startStoring:
for k in range (len(line)):
currText += line[k]
if k != len(line)-1:
currText += " "
for i in range (len(keys)):
self.dialogues[keys[i]] = texts[i]
def main():
pygame.init()
#init font
pygame.font.init()
screen = pygame.display.set_mode((800, 640))
timer = pygame.time.Clock()
gameDialog = Dialog((0, 442), screen)
gameDialog.read("dialogues.txt")
for key, val in gameDialog.dialogues.items():
print("----")
print("KEY: " + str(key))
print("Value: ")
print(val)
print("Text of plant 1.0: " )
#gameDialog.displayDialogue("-plant1.0-")
print(" " )
print("text of -intro1.1-")
gameDialog.loadText("-intro1.1-")
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
return
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
return
if e.type == pygame.KEYDOWN and e.key == pygame.K_z:
gameDialog.hide = False
gameDialog.hasControl = True
if e.type == pygame.KEYDOWN and e.key == pygame.K_x:
gameDialog.hasControl = False
gameDialog.hide = True
screen.fill((0, 0, 0))
gameDialog.update()
if not gameDialog.hide:
gameDialog.draw()
pygame.display.update()
timer.tick(60)
if __name__ == "__main__":
main()
|
[
"import pygame\nfrom pygame import *\n\nSCREEN_SIZE = pygame.Rect((0, 0, 800, 640))\n\nclass Dialog():\n def __init__(self, pos, surface):\n self.dialogues = dict()\n self.image = Surface((SCREEN_SIZE.width, SCREEN_SIZE.height))\n self.image.fill(Color(\"#800080\"))\n self.rect = self.image.get_rect(topleft=pos)\n self.hasControl = False\n self.hide = True # change to self.visible\n self.textFont = pygame.font.SysFont(\"Comic Sans\", 35)\n self.screen = surface\n\n #add hide() and show() functions.\n def draw(self):\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n pressed = pygame.key.get_pressed()\n up = pressed[pygame.K_UP]\n down = pressed[pygame.K_DOWN]\n \n if self.hasControl:\n if up:\n print(\"Up key pressed on dialog\")\n if down:\n print(\"Down key pressed on dialog\")\n \n #update texts in the dialog here i guess?\n return\n\n def loadText(self, key):\n \n\n rowLength = 75\n text = self.dialogues[key]\n print(len(text))\n \n n = len(text)\n start = 0\n end = 0\n rowCounter = 0\n while n > rowLength:\n #find the end.\n for k in range (rowLength+start-1, start, -1):\n if text[k] == \" \":\n end = k\n break\n print(len(text[start:end]))\n textSurface = self.textFont.render(text[start:end], True, (255,255,255))\n self.image.blit(textSurface, (0,rowCounter))\n\n rowCounter += 32\n\n truncatedTextLength = end - start + 1\n n -= truncatedTextLength\n \n start = end + 1\n #add remainder to next row.\n textSurface = self.textFont.render(text[start:len(text)], True, (255,255,255))\n self.image.blit(textSurface, (0, rowCounter))\n \n\n \n \n \n \n print(text)\n\n \n \n\n def read(self, file):\n #read file and store in self.texts\n infile = open(file, 'r')\n keys = []\n texts = []\n currText = \"\"\n startStoring = False\n for line in infile:\n line = line.strip('\\n')\n line = line.split()\n \n if len(line) > 0:\n if line[0] == \"-\":\n keys.append(line[1])\n startStoring = True\n \n elif line[0] == \"*\":\n texts.append(currText)\n currText = \"\"\n startStoring = False\n else:\n if startStoring:\n for k in range (len(line)):\n currText += line[k]\n\n if k != len(line)-1:\n currText += \" \"\n\n for i in range (len(keys)):\n self.dialogues[keys[i]] = texts[i]\n\n \n \n \n\n\ndef main():\n\n pygame.init()\n #init font\n pygame.font.init() \n screen = pygame.display.set_mode((800, 640))\n timer = pygame.time.Clock()\n gameDialog = Dialog((0, 442), screen)\n gameDialog.read(\"dialogues.txt\")\n for key, val in gameDialog.dialogues.items():\n print(\"----\")\n print(\"KEY: \" + str(key))\n print(\"Value: \")\n print(val)\n \n print(\"Text of plant 1.0: \" )\n #gameDialog.displayDialogue(\"-plant1.0-\")\n\n print(\" \" )\n\n print(\"text of -intro1.1-\")\n gameDialog.loadText(\"-intro1.1-\")\n \n \n \n while True:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n return\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n return\n\n if e.type == pygame.KEYDOWN and e.key == pygame.K_z:\n gameDialog.hide = False\n gameDialog.hasControl = True\n if e.type == pygame.KEYDOWN and e.key == pygame.K_x:\n gameDialog.hasControl = False\n gameDialog.hide = True\n\n screen.fill((0, 0, 0)) \n\n gameDialog.update()\n if not gameDialog.hide:\n gameDialog.draw()\n \n\n pygame.display.update()\n timer.tick(60)\n \n\n\n \nif __name__ == \"__main__\":\n main()\n",
"import pygame\nfrom pygame import *\nSCREEN_SIZE = pygame.Rect((0, 0, 800, 640))\n\n\nclass Dialog:\n\n def __init__(self, pos, surface):\n self.dialogues = dict()\n self.image = Surface((SCREEN_SIZE.width, SCREEN_SIZE.height))\n self.image.fill(Color('#800080'))\n self.rect = self.image.get_rect(topleft=pos)\n self.hasControl = False\n self.hide = True\n self.textFont = pygame.font.SysFont('Comic Sans', 35)\n self.screen = surface\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n pressed = pygame.key.get_pressed()\n up = pressed[pygame.K_UP]\n down = pressed[pygame.K_DOWN]\n if self.hasControl:\n if up:\n print('Up key pressed on dialog')\n if down:\n print('Down key pressed on dialog')\n return\n\n def loadText(self, key):\n rowLength = 75\n text = self.dialogues[key]\n print(len(text))\n n = len(text)\n start = 0\n end = 0\n rowCounter = 0\n while n > rowLength:\n for k in range(rowLength + start - 1, start, -1):\n if text[k] == ' ':\n end = k\n break\n print(len(text[start:end]))\n textSurface = self.textFont.render(text[start:end], True, (255,\n 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n rowCounter += 32\n truncatedTextLength = end - start + 1\n n -= truncatedTextLength\n start = end + 1\n textSurface = self.textFont.render(text[start:len(text)], True, (\n 255, 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n print(text)\n\n def read(self, file):\n infile = open(file, 'r')\n keys = []\n texts = []\n currText = ''\n startStoring = False\n for line in infile:\n line = line.strip('\\n')\n line = line.split()\n if len(line) > 0:\n if line[0] == '-':\n keys.append(line[1])\n startStoring = True\n elif line[0] == '*':\n texts.append(currText)\n currText = ''\n startStoring = False\n elif startStoring:\n for k in range(len(line)):\n currText += line[k]\n if k != len(line) - 1:\n currText += ' '\n for i in range(len(keys)):\n self.dialogues[keys[i]] = texts[i]\n\n\ndef main():\n pygame.init()\n pygame.font.init()\n screen = pygame.display.set_mode((800, 640))\n timer = pygame.time.Clock()\n gameDialog = Dialog((0, 442), screen)\n gameDialog.read('dialogues.txt')\n for key, val in gameDialog.dialogues.items():\n print('----')\n print('KEY: ' + str(key))\n print('Value: ')\n print(val)\n print('Text of plant 1.0: ')\n print(' ')\n print('text of -intro1.1-')\n gameDialog.loadText('-intro1.1-')\n while True:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n return\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n return\n if e.type == pygame.KEYDOWN and e.key == pygame.K_z:\n gameDialog.hide = False\n gameDialog.hasControl = True\n if e.type == pygame.KEYDOWN and e.key == pygame.K_x:\n gameDialog.hasControl = False\n gameDialog.hide = True\n screen.fill((0, 0, 0))\n gameDialog.update()\n if not gameDialog.hide:\n gameDialog.draw()\n pygame.display.update()\n timer.tick(60)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\nSCREEN_SIZE = pygame.Rect((0, 0, 800, 640))\n\n\nclass Dialog:\n\n def __init__(self, pos, surface):\n self.dialogues = dict()\n self.image = Surface((SCREEN_SIZE.width, SCREEN_SIZE.height))\n self.image.fill(Color('#800080'))\n self.rect = self.image.get_rect(topleft=pos)\n self.hasControl = False\n self.hide = True\n self.textFont = pygame.font.SysFont('Comic Sans', 35)\n self.screen = surface\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n pressed = pygame.key.get_pressed()\n up = pressed[pygame.K_UP]\n down = pressed[pygame.K_DOWN]\n if self.hasControl:\n if up:\n print('Up key pressed on dialog')\n if down:\n print('Down key pressed on dialog')\n return\n\n def loadText(self, key):\n rowLength = 75\n text = self.dialogues[key]\n print(len(text))\n n = len(text)\n start = 0\n end = 0\n rowCounter = 0\n while n > rowLength:\n for k in range(rowLength + start - 1, start, -1):\n if text[k] == ' ':\n end = k\n break\n print(len(text[start:end]))\n textSurface = self.textFont.render(text[start:end], True, (255,\n 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n rowCounter += 32\n truncatedTextLength = end - start + 1\n n -= truncatedTextLength\n start = end + 1\n textSurface = self.textFont.render(text[start:len(text)], True, (\n 255, 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n print(text)\n\n def read(self, file):\n infile = open(file, 'r')\n keys = []\n texts = []\n currText = ''\n startStoring = False\n for line in infile:\n line = line.strip('\\n')\n line = line.split()\n if len(line) > 0:\n if line[0] == '-':\n keys.append(line[1])\n startStoring = True\n elif line[0] == '*':\n texts.append(currText)\n currText = ''\n startStoring = False\n elif startStoring:\n for k in range(len(line)):\n currText += line[k]\n if k != len(line) - 1:\n currText += ' '\n for i in range(len(keys)):\n self.dialogues[keys[i]] = texts[i]\n\n\ndef main():\n pygame.init()\n pygame.font.init()\n screen = pygame.display.set_mode((800, 640))\n timer = pygame.time.Clock()\n gameDialog = Dialog((0, 442), screen)\n gameDialog.read('dialogues.txt')\n for key, val in gameDialog.dialogues.items():\n print('----')\n print('KEY: ' + str(key))\n print('Value: ')\n print(val)\n print('Text of plant 1.0: ')\n print(' ')\n print('text of -intro1.1-')\n gameDialog.loadText('-intro1.1-')\n while True:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n return\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n return\n if e.type == pygame.KEYDOWN and e.key == pygame.K_z:\n gameDialog.hide = False\n gameDialog.hasControl = True\n if e.type == pygame.KEYDOWN and e.key == pygame.K_x:\n gameDialog.hasControl = False\n gameDialog.hide = True\n screen.fill((0, 0, 0))\n gameDialog.update()\n if not gameDialog.hide:\n gameDialog.draw()\n pygame.display.update()\n timer.tick(60)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\n\n\nclass Dialog:\n\n def __init__(self, pos, surface):\n self.dialogues = dict()\n self.image = Surface((SCREEN_SIZE.width, SCREEN_SIZE.height))\n self.image.fill(Color('#800080'))\n self.rect = self.image.get_rect(topleft=pos)\n self.hasControl = False\n self.hide = True\n self.textFont = pygame.font.SysFont('Comic Sans', 35)\n self.screen = surface\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n pressed = pygame.key.get_pressed()\n up = pressed[pygame.K_UP]\n down = pressed[pygame.K_DOWN]\n if self.hasControl:\n if up:\n print('Up key pressed on dialog')\n if down:\n print('Down key pressed on dialog')\n return\n\n def loadText(self, key):\n rowLength = 75\n text = self.dialogues[key]\n print(len(text))\n n = len(text)\n start = 0\n end = 0\n rowCounter = 0\n while n > rowLength:\n for k in range(rowLength + start - 1, start, -1):\n if text[k] == ' ':\n end = k\n break\n print(len(text[start:end]))\n textSurface = self.textFont.render(text[start:end], True, (255,\n 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n rowCounter += 32\n truncatedTextLength = end - start + 1\n n -= truncatedTextLength\n start = end + 1\n textSurface = self.textFont.render(text[start:len(text)], True, (\n 255, 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n print(text)\n\n def read(self, file):\n infile = open(file, 'r')\n keys = []\n texts = []\n currText = ''\n startStoring = False\n for line in infile:\n line = line.strip('\\n')\n line = line.split()\n if len(line) > 0:\n if line[0] == '-':\n keys.append(line[1])\n startStoring = True\n elif line[0] == '*':\n texts.append(currText)\n currText = ''\n startStoring = False\n elif startStoring:\n for k in range(len(line)):\n currText += line[k]\n if k != len(line) - 1:\n currText += ' '\n for i in range(len(keys)):\n self.dialogues[keys[i]] = texts[i]\n\n\ndef main():\n pygame.init()\n pygame.font.init()\n screen = pygame.display.set_mode((800, 640))\n timer = pygame.time.Clock()\n gameDialog = Dialog((0, 442), screen)\n gameDialog.read('dialogues.txt')\n for key, val in gameDialog.dialogues.items():\n print('----')\n print('KEY: ' + str(key))\n print('Value: ')\n print(val)\n print('Text of plant 1.0: ')\n print(' ')\n print('text of -intro1.1-')\n gameDialog.loadText('-intro1.1-')\n while True:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n return\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n return\n if e.type == pygame.KEYDOWN and e.key == pygame.K_z:\n gameDialog.hide = False\n gameDialog.hasControl = True\n if e.type == pygame.KEYDOWN and e.key == pygame.K_x:\n gameDialog.hasControl = False\n gameDialog.hide = True\n screen.fill((0, 0, 0))\n gameDialog.update()\n if not gameDialog.hide:\n gameDialog.draw()\n pygame.display.update()\n timer.tick(60)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\n\n\nclass Dialog:\n\n def __init__(self, pos, surface):\n self.dialogues = dict()\n self.image = Surface((SCREEN_SIZE.width, SCREEN_SIZE.height))\n self.image.fill(Color('#800080'))\n self.rect = self.image.get_rect(topleft=pos)\n self.hasControl = False\n self.hide = True\n self.textFont = pygame.font.SysFont('Comic Sans', 35)\n self.screen = surface\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n pressed = pygame.key.get_pressed()\n up = pressed[pygame.K_UP]\n down = pressed[pygame.K_DOWN]\n if self.hasControl:\n if up:\n print('Up key pressed on dialog')\n if down:\n print('Down key pressed on dialog')\n return\n\n def loadText(self, key):\n rowLength = 75\n text = self.dialogues[key]\n print(len(text))\n n = len(text)\n start = 0\n end = 0\n rowCounter = 0\n while n > rowLength:\n for k in range(rowLength + start - 1, start, -1):\n if text[k] == ' ':\n end = k\n break\n print(len(text[start:end]))\n textSurface = self.textFont.render(text[start:end], True, (255,\n 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n rowCounter += 32\n truncatedTextLength = end - start + 1\n n -= truncatedTextLength\n start = end + 1\n textSurface = self.textFont.render(text[start:len(text)], True, (\n 255, 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n print(text)\n\n def read(self, file):\n infile = open(file, 'r')\n keys = []\n texts = []\n currText = ''\n startStoring = False\n for line in infile:\n line = line.strip('\\n')\n line = line.split()\n if len(line) > 0:\n if line[0] == '-':\n keys.append(line[1])\n startStoring = True\n elif line[0] == '*':\n texts.append(currText)\n currText = ''\n startStoring = False\n elif startStoring:\n for k in range(len(line)):\n currText += line[k]\n if k != len(line) - 1:\n currText += ' '\n for i in range(len(keys)):\n self.dialogues[keys[i]] = texts[i]\n\n\ndef main():\n pygame.init()\n pygame.font.init()\n screen = pygame.display.set_mode((800, 640))\n timer = pygame.time.Clock()\n gameDialog = Dialog((0, 442), screen)\n gameDialog.read('dialogues.txt')\n for key, val in gameDialog.dialogues.items():\n print('----')\n print('KEY: ' + str(key))\n print('Value: ')\n print(val)\n print('Text of plant 1.0: ')\n print(' ')\n print('text of -intro1.1-')\n gameDialog.loadText('-intro1.1-')\n while True:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n return\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n return\n if e.type == pygame.KEYDOWN and e.key == pygame.K_z:\n gameDialog.hide = False\n gameDialog.hasControl = True\n if e.type == pygame.KEYDOWN and e.key == pygame.K_x:\n gameDialog.hasControl = False\n gameDialog.hide = True\n screen.fill((0, 0, 0))\n gameDialog.update()\n if not gameDialog.hide:\n gameDialog.draw()\n pygame.display.update()\n timer.tick(60)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dialog:\n\n def __init__(self, pos, surface):\n self.dialogues = dict()\n self.image = Surface((SCREEN_SIZE.width, SCREEN_SIZE.height))\n self.image.fill(Color('#800080'))\n self.rect = self.image.get_rect(topleft=pos)\n self.hasControl = False\n self.hide = True\n self.textFont = pygame.font.SysFont('Comic Sans', 35)\n self.screen = surface\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n pressed = pygame.key.get_pressed()\n up = pressed[pygame.K_UP]\n down = pressed[pygame.K_DOWN]\n if self.hasControl:\n if up:\n print('Up key pressed on dialog')\n if down:\n print('Down key pressed on dialog')\n return\n\n def loadText(self, key):\n rowLength = 75\n text = self.dialogues[key]\n print(len(text))\n n = len(text)\n start = 0\n end = 0\n rowCounter = 0\n while n > rowLength:\n for k in range(rowLength + start - 1, start, -1):\n if text[k] == ' ':\n end = k\n break\n print(len(text[start:end]))\n textSurface = self.textFont.render(text[start:end], True, (255,\n 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n rowCounter += 32\n truncatedTextLength = end - start + 1\n n -= truncatedTextLength\n start = end + 1\n textSurface = self.textFont.render(text[start:len(text)], True, (\n 255, 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n print(text)\n\n def read(self, file):\n infile = open(file, 'r')\n keys = []\n texts = []\n currText = ''\n startStoring = False\n for line in infile:\n line = line.strip('\\n')\n line = line.split()\n if len(line) > 0:\n if line[0] == '-':\n keys.append(line[1])\n startStoring = True\n elif line[0] == '*':\n texts.append(currText)\n currText = ''\n startStoring = False\n elif startStoring:\n for k in range(len(line)):\n currText += line[k]\n if k != len(line) - 1:\n currText += ' '\n for i in range(len(keys)):\n self.dialogues[keys[i]] = texts[i]\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dialog:\n <function token>\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n pressed = pygame.key.get_pressed()\n up = pressed[pygame.K_UP]\n down = pressed[pygame.K_DOWN]\n if self.hasControl:\n if up:\n print('Up key pressed on dialog')\n if down:\n print('Down key pressed on dialog')\n return\n\n def loadText(self, key):\n rowLength = 75\n text = self.dialogues[key]\n print(len(text))\n n = len(text)\n start = 0\n end = 0\n rowCounter = 0\n while n > rowLength:\n for k in range(rowLength + start - 1, start, -1):\n if text[k] == ' ':\n end = k\n break\n print(len(text[start:end]))\n textSurface = self.textFont.render(text[start:end], True, (255,\n 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n rowCounter += 32\n truncatedTextLength = end - start + 1\n n -= truncatedTextLength\n start = end + 1\n textSurface = self.textFont.render(text[start:len(text)], True, (\n 255, 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n print(text)\n\n def read(self, file):\n infile = open(file, 'r')\n keys = []\n texts = []\n currText = ''\n startStoring = False\n for line in infile:\n line = line.strip('\\n')\n line = line.split()\n if len(line) > 0:\n if line[0] == '-':\n keys.append(line[1])\n startStoring = True\n elif line[0] == '*':\n texts.append(currText)\n currText = ''\n startStoring = False\n elif startStoring:\n for k in range(len(line)):\n currText += line[k]\n if k != len(line) - 1:\n currText += ' '\n for i in range(len(keys)):\n self.dialogues[keys[i]] = texts[i]\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dialog:\n <function token>\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n pressed = pygame.key.get_pressed()\n up = pressed[pygame.K_UP]\n down = pressed[pygame.K_DOWN]\n if self.hasControl:\n if up:\n print('Up key pressed on dialog')\n if down:\n print('Down key pressed on dialog')\n return\n\n def loadText(self, key):\n rowLength = 75\n text = self.dialogues[key]\n print(len(text))\n n = len(text)\n start = 0\n end = 0\n rowCounter = 0\n while n > rowLength:\n for k in range(rowLength + start - 1, start, -1):\n if text[k] == ' ':\n end = k\n break\n print(len(text[start:end]))\n textSurface = self.textFont.render(text[start:end], True, (255,\n 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n rowCounter += 32\n truncatedTextLength = end - start + 1\n n -= truncatedTextLength\n start = end + 1\n textSurface = self.textFont.render(text[start:len(text)], True, (\n 255, 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n print(text)\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dialog:\n <function token>\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n <function token>\n\n def loadText(self, key):\n rowLength = 75\n text = self.dialogues[key]\n print(len(text))\n n = len(text)\n start = 0\n end = 0\n rowCounter = 0\n while n > rowLength:\n for k in range(rowLength + start - 1, start, -1):\n if text[k] == ' ':\n end = k\n break\n print(len(text[start:end]))\n textSurface = self.textFont.render(text[start:end], True, (255,\n 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n rowCounter += 32\n truncatedTextLength = end - start + 1\n n -= truncatedTextLength\n start = end + 1\n textSurface = self.textFont.render(text[start:len(text)], True, (\n 255, 255, 255))\n self.image.blit(textSurface, (0, rowCounter))\n print(text)\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dialog:\n <function token>\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dialog:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<function token>\n<code token>\n"
] | false |
98,687 |
a0d8748f1d19be38575b7f74cacf8a46b1970df5
|
from collections import Counter
N = int(input())
A = list(map(int, input().split()))
subordinates = [0] * N
boss_and_cnt = Counter(A)
for boss, cnt in boss_and_cnt.items():
subordinates[boss - 1] = cnt
for s in subordinates:
print(s)
|
[
"from collections import Counter\nN = int(input())\nA = list(map(int, input().split()))\nsubordinates = [0] * N\nboss_and_cnt = Counter(A)\nfor boss, cnt in boss_and_cnt.items():\n subordinates[boss - 1] = cnt\nfor s in subordinates:\n print(s)",
"from collections import Counter\nN = int(input())\nA = list(map(int, input().split()))\nsubordinates = [0] * N\nboss_and_cnt = Counter(A)\nfor boss, cnt in boss_and_cnt.items():\n subordinates[boss - 1] = cnt\nfor s in subordinates:\n print(s)\n",
"<import token>\nN = int(input())\nA = list(map(int, input().split()))\nsubordinates = [0] * N\nboss_and_cnt = Counter(A)\nfor boss, cnt in boss_and_cnt.items():\n subordinates[boss - 1] = cnt\nfor s in subordinates:\n print(s)\n",
"<import token>\n<assignment token>\nfor boss, cnt in boss_and_cnt.items():\n subordinates[boss - 1] = cnt\nfor s in subordinates:\n print(s)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,688 |
9b1481271b79190d37e5154ca9f52f893a5c49fd
|
from collections import defaultdict
n, h, l = map(int, input().split())
current = list(map(int, input().split()))
next_movies = []
score = {i: float('inf') for i in range(n)}
similar = defaultdict(list)
for c in current:
score[c] = 0
for _ in range(l):
a, b = map(int, input().split())
similar[a].append(b)
similar[b].append(a)
while True:
for c in current:
for f in similar[c]:
if score[f] == float('inf'):
score[f] = score[c] + 1
next_movies.append(f)
if next_movies == current:
break
current = next_movies.copy()
score = sorted(score.items(), key=lambda x: -x[1])
print(score[0][0])
|
[
"from collections import defaultdict\n\nn, h, l = map(int, input().split())\ncurrent = list(map(int, input().split()))\nnext_movies = []\nscore = {i: float('inf') for i in range(n)}\nsimilar = defaultdict(list)\nfor c in current:\n score[c] = 0\nfor _ in range(l):\n a, b = map(int, input().split())\n similar[a].append(b)\n similar[b].append(a)\n\nwhile True:\n for c in current:\n for f in similar[c]:\n if score[f] == float('inf'):\n score[f] = score[c] + 1\n next_movies.append(f)\n if next_movies == current:\n break\n current = next_movies.copy()\n\nscore = sorted(score.items(), key=lambda x: -x[1])\nprint(score[0][0])\n",
"from collections import defaultdict\nn, h, l = map(int, input().split())\ncurrent = list(map(int, input().split()))\nnext_movies = []\nscore = {i: float('inf') for i in range(n)}\nsimilar = defaultdict(list)\nfor c in current:\n score[c] = 0\nfor _ in range(l):\n a, b = map(int, input().split())\n similar[a].append(b)\n similar[b].append(a)\nwhile True:\n for c in current:\n for f in similar[c]:\n if score[f] == float('inf'):\n score[f] = score[c] + 1\n next_movies.append(f)\n if next_movies == current:\n break\n current = next_movies.copy()\nscore = sorted(score.items(), key=lambda x: -x[1])\nprint(score[0][0])\n",
"<import token>\nn, h, l = map(int, input().split())\ncurrent = list(map(int, input().split()))\nnext_movies = []\nscore = {i: float('inf') for i in range(n)}\nsimilar = defaultdict(list)\nfor c in current:\n score[c] = 0\nfor _ in range(l):\n a, b = map(int, input().split())\n similar[a].append(b)\n similar[b].append(a)\nwhile True:\n for c in current:\n for f in similar[c]:\n if score[f] == float('inf'):\n score[f] = score[c] + 1\n next_movies.append(f)\n if next_movies == current:\n break\n current = next_movies.copy()\nscore = sorted(score.items(), key=lambda x: -x[1])\nprint(score[0][0])\n",
"<import token>\n<assignment token>\nfor c in current:\n score[c] = 0\nfor _ in range(l):\n a, b = map(int, input().split())\n similar[a].append(b)\n similar[b].append(a)\nwhile True:\n for c in current:\n for f in similar[c]:\n if score[f] == float('inf'):\n score[f] = score[c] + 1\n next_movies.append(f)\n if next_movies == current:\n break\n current = next_movies.copy()\n<assignment token>\nprint(score[0][0])\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,689 |
594c62fcd301ccbfb5aceeb7770210737c5c7724
|
a=('cats','cats','cats','cats','dogs','horses')
b=a.count('cats')
print(b)
|
[
"a=('cats','cats','cats','cats','dogs','horses')\nb=a.count('cats')\nprint(b)\n",
"a = 'cats', 'cats', 'cats', 'cats', 'dogs', 'horses'\nb = a.count('cats')\nprint(b)\n",
"<assignment token>\nprint(b)\n",
"<assignment token>\n<code token>\n"
] | false |
98,690 |
085822f5c35fd60d4d32a35ae4de288d398cedc8
|
# Generated by Django 3.0.5 on 2020-04-13 09:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20200412_1826'),
]
operations = [
migrations.AlterField(
model_name='item',
name='qty',
field=models.CharField(default='', max_length=100),
),
]
|
[
"# Generated by Django 3.0.5 on 2020-04-13 09:06\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0002_auto_20200412_1826'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='item',\n name='qty',\n field=models.CharField(default='', max_length=100),\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main', '0002_auto_20200412_1826')]\n operations = [migrations.AlterField(model_name='item', name='qty',\n field=models.CharField(default='', max_length=100))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main', '0002_auto_20200412_1826')]\n operations = [migrations.AlterField(model_name='item', name='qty',\n field=models.CharField(default='', max_length=100))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,691 |
48bbe86f00deb37a0ea095772c7d079953fd5ee9
|
# -*- coding: utf-8 -*-
# @Date : 2018-12-12
# @Author : Peng Shiyu
import requests
from scrapy import signals
from scrapy_util.logger import logger
from scrapy_util.utils import ScrapydUtil
class StatsCollectorExtension(object):
"""
日志记录扩展
"""
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
def __init__(self, crawler, log_file=None, stats_collection_url=None):
self.stats_collection_url = stats_collection_url
self.log_file = log_file
crawler.signals.connect(self.spider_closed, signal=signals.spider_closed)
@classmethod
def from_crawler(cls, crawler):
log_file = crawler.settings.get("LOG_FILE")
stats_collection_url = crawler.settings.get("STATS_COLLECTION_URL")
if stats_collection_url is None:
raise Exception('STATS_COLLECTION_URL not in settings')
return cls(crawler, log_file=log_file, stats_collection_url=stats_collection_url)
def spider_closed(self, spider, reason):
stats = spider.crawler.stats.get_stats()
# 获取数据
start_time = stats.get("start_time")
finish_time = stats.get("finish_time")
duration = (finish_time - start_time).seconds
# 保存收集到的信息
result = ScrapydUtil.parse_log_file(self.log_file)
item = {
"job_id": result.get('job_id', ''),
"project": result.get('project', ''),
"spider": spider.name,
"item_scraped_count": stats.get("item_scraped_count", 0),
"item_dropped_count": stats.get("item_dropped_count", 0),
"start_time": start_time.strftime(self.DATETIME_FORMAT),
"finish_time": finish_time.strftime(self.DATETIME_FORMAT),
"duration": duration,
"finish_reason": stats.get("finish_reason"),
"log_error_count": stats.get("log_count/ERROR", 0),
}
logger.info(item)
self.collection_item(item)
def collection_item(self, item):
"""处理收集到的数据,以json 形式提交"""
res = requests.post(self.stats_collection_url, json=item)
logger.info(res.text)
|
[
"# -*- coding: utf-8 -*-\n\n# @Date : 2018-12-12\n# @Author : Peng Shiyu\n\nimport requests\nfrom scrapy import signals\n\nfrom scrapy_util.logger import logger\nfrom scrapy_util.utils import ScrapydUtil\n\n\nclass StatsCollectorExtension(object):\n \"\"\"\n 日志记录扩展\n \"\"\"\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n def __init__(self, crawler, log_file=None, stats_collection_url=None):\n self.stats_collection_url = stats_collection_url\n self.log_file = log_file\n\n crawler.signals.connect(self.spider_closed, signal=signals.spider_closed)\n\n @classmethod\n def from_crawler(cls, crawler):\n log_file = crawler.settings.get(\"LOG_FILE\")\n stats_collection_url = crawler.settings.get(\"STATS_COLLECTION_URL\")\n\n if stats_collection_url is None:\n raise Exception('STATS_COLLECTION_URL not in settings')\n\n return cls(crawler, log_file=log_file, stats_collection_url=stats_collection_url)\n\n def spider_closed(self, spider, reason):\n stats = spider.crawler.stats.get_stats()\n\n # 获取数据\n start_time = stats.get(\"start_time\")\n finish_time = stats.get(\"finish_time\")\n duration = (finish_time - start_time).seconds\n\n # 保存收集到的信息\n result = ScrapydUtil.parse_log_file(self.log_file)\n\n item = {\n \"job_id\": result.get('job_id', ''),\n \"project\": result.get('project', ''),\n \"spider\": spider.name,\n \"item_scraped_count\": stats.get(\"item_scraped_count\", 0),\n \"item_dropped_count\": stats.get(\"item_dropped_count\", 0),\n \"start_time\": start_time.strftime(self.DATETIME_FORMAT),\n \"finish_time\": finish_time.strftime(self.DATETIME_FORMAT),\n \"duration\": duration,\n \"finish_reason\": stats.get(\"finish_reason\"),\n \"log_error_count\": stats.get(\"log_count/ERROR\", 0),\n }\n\n logger.info(item)\n\n self.collection_item(item)\n\n def collection_item(self, item):\n \"\"\"处理收集到的数据,以json 形式提交\"\"\"\n res = requests.post(self.stats_collection_url, json=item)\n logger.info(res.text)\n\n",
"import requests\nfrom scrapy import signals\nfrom scrapy_util.logger import logger\nfrom scrapy_util.utils import ScrapydUtil\n\n\nclass StatsCollectorExtension(object):\n \"\"\"\n 日志记录扩展\n \"\"\"\n DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'\n\n def __init__(self, crawler, log_file=None, stats_collection_url=None):\n self.stats_collection_url = stats_collection_url\n self.log_file = log_file\n crawler.signals.connect(self.spider_closed, signal=signals.\n spider_closed)\n\n @classmethod\n def from_crawler(cls, crawler):\n log_file = crawler.settings.get('LOG_FILE')\n stats_collection_url = crawler.settings.get('STATS_COLLECTION_URL')\n if stats_collection_url is None:\n raise Exception('STATS_COLLECTION_URL not in settings')\n return cls(crawler, log_file=log_file, stats_collection_url=\n stats_collection_url)\n\n def spider_closed(self, spider, reason):\n stats = spider.crawler.stats.get_stats()\n start_time = stats.get('start_time')\n finish_time = stats.get('finish_time')\n duration = (finish_time - start_time).seconds\n result = ScrapydUtil.parse_log_file(self.log_file)\n item = {'job_id': result.get('job_id', ''), 'project': result.get(\n 'project', ''), 'spider': spider.name, 'item_scraped_count':\n stats.get('item_scraped_count', 0), 'item_dropped_count': stats\n .get('item_dropped_count', 0), 'start_time': start_time.\n strftime(self.DATETIME_FORMAT), 'finish_time': finish_time.\n strftime(self.DATETIME_FORMAT), 'duration': duration,\n 'finish_reason': stats.get('finish_reason'), 'log_error_count':\n stats.get('log_count/ERROR', 0)}\n logger.info(item)\n self.collection_item(item)\n\n def collection_item(self, item):\n \"\"\"处理收集到的数据,以json 形式提交\"\"\"\n res = requests.post(self.stats_collection_url, json=item)\n logger.info(res.text)\n",
"<import token>\n\n\nclass StatsCollectorExtension(object):\n \"\"\"\n 日志记录扩展\n \"\"\"\n DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'\n\n def __init__(self, crawler, log_file=None, stats_collection_url=None):\n self.stats_collection_url = stats_collection_url\n self.log_file = log_file\n crawler.signals.connect(self.spider_closed, signal=signals.\n spider_closed)\n\n @classmethod\n def from_crawler(cls, crawler):\n log_file = crawler.settings.get('LOG_FILE')\n stats_collection_url = crawler.settings.get('STATS_COLLECTION_URL')\n if stats_collection_url is None:\n raise Exception('STATS_COLLECTION_URL not in settings')\n return cls(crawler, log_file=log_file, stats_collection_url=\n stats_collection_url)\n\n def spider_closed(self, spider, reason):\n stats = spider.crawler.stats.get_stats()\n start_time = stats.get('start_time')\n finish_time = stats.get('finish_time')\n duration = (finish_time - start_time).seconds\n result = ScrapydUtil.parse_log_file(self.log_file)\n item = {'job_id': result.get('job_id', ''), 'project': result.get(\n 'project', ''), 'spider': spider.name, 'item_scraped_count':\n stats.get('item_scraped_count', 0), 'item_dropped_count': stats\n .get('item_dropped_count', 0), 'start_time': start_time.\n strftime(self.DATETIME_FORMAT), 'finish_time': finish_time.\n strftime(self.DATETIME_FORMAT), 'duration': duration,\n 'finish_reason': stats.get('finish_reason'), 'log_error_count':\n stats.get('log_count/ERROR', 0)}\n logger.info(item)\n self.collection_item(item)\n\n def collection_item(self, item):\n \"\"\"处理收集到的数据,以json 形式提交\"\"\"\n res = requests.post(self.stats_collection_url, json=item)\n logger.info(res.text)\n",
"<import token>\n\n\nclass StatsCollectorExtension(object):\n <docstring token>\n DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'\n\n def __init__(self, crawler, log_file=None, stats_collection_url=None):\n self.stats_collection_url = stats_collection_url\n self.log_file = log_file\n crawler.signals.connect(self.spider_closed, signal=signals.\n spider_closed)\n\n @classmethod\n def from_crawler(cls, crawler):\n log_file = crawler.settings.get('LOG_FILE')\n stats_collection_url = crawler.settings.get('STATS_COLLECTION_URL')\n if stats_collection_url is None:\n raise Exception('STATS_COLLECTION_URL not in settings')\n return cls(crawler, log_file=log_file, stats_collection_url=\n stats_collection_url)\n\n def spider_closed(self, spider, reason):\n stats = spider.crawler.stats.get_stats()\n start_time = stats.get('start_time')\n finish_time = stats.get('finish_time')\n duration = (finish_time - start_time).seconds\n result = ScrapydUtil.parse_log_file(self.log_file)\n item = {'job_id': result.get('job_id', ''), 'project': result.get(\n 'project', ''), 'spider': spider.name, 'item_scraped_count':\n stats.get('item_scraped_count', 0), 'item_dropped_count': stats\n .get('item_dropped_count', 0), 'start_time': start_time.\n strftime(self.DATETIME_FORMAT), 'finish_time': finish_time.\n strftime(self.DATETIME_FORMAT), 'duration': duration,\n 'finish_reason': stats.get('finish_reason'), 'log_error_count':\n stats.get('log_count/ERROR', 0)}\n logger.info(item)\n self.collection_item(item)\n\n def collection_item(self, item):\n \"\"\"处理收集到的数据,以json 形式提交\"\"\"\n res = requests.post(self.stats_collection_url, json=item)\n logger.info(res.text)\n",
"<import token>\n\n\nclass StatsCollectorExtension(object):\n <docstring token>\n <assignment token>\n\n def __init__(self, crawler, log_file=None, stats_collection_url=None):\n self.stats_collection_url = stats_collection_url\n self.log_file = log_file\n crawler.signals.connect(self.spider_closed, signal=signals.\n spider_closed)\n\n @classmethod\n def from_crawler(cls, crawler):\n log_file = crawler.settings.get('LOG_FILE')\n stats_collection_url = crawler.settings.get('STATS_COLLECTION_URL')\n if stats_collection_url is None:\n raise Exception('STATS_COLLECTION_URL not in settings')\n return cls(crawler, log_file=log_file, stats_collection_url=\n stats_collection_url)\n\n def spider_closed(self, spider, reason):\n stats = spider.crawler.stats.get_stats()\n start_time = stats.get('start_time')\n finish_time = stats.get('finish_time')\n duration = (finish_time - start_time).seconds\n result = ScrapydUtil.parse_log_file(self.log_file)\n item = {'job_id': result.get('job_id', ''), 'project': result.get(\n 'project', ''), 'spider': spider.name, 'item_scraped_count':\n stats.get('item_scraped_count', 0), 'item_dropped_count': stats\n .get('item_dropped_count', 0), 'start_time': start_time.\n strftime(self.DATETIME_FORMAT), 'finish_time': finish_time.\n strftime(self.DATETIME_FORMAT), 'duration': duration,\n 'finish_reason': stats.get('finish_reason'), 'log_error_count':\n stats.get('log_count/ERROR', 0)}\n logger.info(item)\n self.collection_item(item)\n\n def collection_item(self, item):\n \"\"\"处理收集到的数据,以json 形式提交\"\"\"\n res = requests.post(self.stats_collection_url, json=item)\n logger.info(res.text)\n",
"<import token>\n\n\nclass StatsCollectorExtension(object):\n <docstring token>\n <assignment token>\n\n def __init__(self, crawler, log_file=None, stats_collection_url=None):\n self.stats_collection_url = stats_collection_url\n self.log_file = log_file\n crawler.signals.connect(self.spider_closed, signal=signals.\n spider_closed)\n\n @classmethod\n def from_crawler(cls, crawler):\n log_file = crawler.settings.get('LOG_FILE')\n stats_collection_url = crawler.settings.get('STATS_COLLECTION_URL')\n if stats_collection_url is None:\n raise Exception('STATS_COLLECTION_URL not in settings')\n return cls(crawler, log_file=log_file, stats_collection_url=\n stats_collection_url)\n\n def spider_closed(self, spider, reason):\n stats = spider.crawler.stats.get_stats()\n start_time = stats.get('start_time')\n finish_time = stats.get('finish_time')\n duration = (finish_time - start_time).seconds\n result = ScrapydUtil.parse_log_file(self.log_file)\n item = {'job_id': result.get('job_id', ''), 'project': result.get(\n 'project', ''), 'spider': spider.name, 'item_scraped_count':\n stats.get('item_scraped_count', 0), 'item_dropped_count': stats\n .get('item_dropped_count', 0), 'start_time': start_time.\n strftime(self.DATETIME_FORMAT), 'finish_time': finish_time.\n strftime(self.DATETIME_FORMAT), 'duration': duration,\n 'finish_reason': stats.get('finish_reason'), 'log_error_count':\n stats.get('log_count/ERROR', 0)}\n logger.info(item)\n self.collection_item(item)\n <function token>\n",
"<import token>\n\n\nclass StatsCollectorExtension(object):\n <docstring token>\n <assignment token>\n\n def __init__(self, crawler, log_file=None, stats_collection_url=None):\n self.stats_collection_url = stats_collection_url\n self.log_file = log_file\n crawler.signals.connect(self.spider_closed, signal=signals.\n spider_closed)\n\n @classmethod\n def from_crawler(cls, crawler):\n log_file = crawler.settings.get('LOG_FILE')\n stats_collection_url = crawler.settings.get('STATS_COLLECTION_URL')\n if stats_collection_url is None:\n raise Exception('STATS_COLLECTION_URL not in settings')\n return cls(crawler, log_file=log_file, stats_collection_url=\n stats_collection_url)\n <function token>\n <function token>\n",
"<import token>\n\n\nclass StatsCollectorExtension(object):\n <docstring token>\n <assignment token>\n\n def __init__(self, crawler, log_file=None, stats_collection_url=None):\n self.stats_collection_url = stats_collection_url\n self.log_file = log_file\n crawler.signals.connect(self.spider_closed, signal=signals.\n spider_closed)\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass StatsCollectorExtension(object):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,692 |
72101d770a05a7e29c24421aa6f75ac27c02d597
|
from selenium.webdriver.common.by import By
from features.lib.pages.BasePage import BasePage
class CustomerShopLocator(BasePage):
def __init__(self, context):
BasePage.__init__(
self,
context.driver,
base_url='https://www.somewebsite.com')
locator_dictionary = {
"someElementName": (By.CSS_SELECTOR, 'some_css_selector')
}
|
[
"from selenium.webdriver.common.by import By\n\nfrom features.lib.pages.BasePage import BasePage\n\n\nclass CustomerShopLocator(BasePage):\n\n def __init__(self, context):\n\n BasePage.__init__(\n self,\n context.driver,\n base_url='https://www.somewebsite.com')\n\n locator_dictionary = {\n \"someElementName\": (By.CSS_SELECTOR, 'some_css_selector')\n }",
"from selenium.webdriver.common.by import By\nfrom features.lib.pages.BasePage import BasePage\n\n\nclass CustomerShopLocator(BasePage):\n\n def __init__(self, context):\n BasePage.__init__(self, context.driver, base_url=\n 'https://www.somewebsite.com')\n locator_dictionary = {'someElementName': (By.CSS_SELECTOR,\n 'some_css_selector')}\n",
"<import token>\n\n\nclass CustomerShopLocator(BasePage):\n\n def __init__(self, context):\n BasePage.__init__(self, context.driver, base_url=\n 'https://www.somewebsite.com')\n locator_dictionary = {'someElementName': (By.CSS_SELECTOR,\n 'some_css_selector')}\n",
"<import token>\n\n\nclass CustomerShopLocator(BasePage):\n\n def __init__(self, context):\n BasePage.__init__(self, context.driver, base_url=\n 'https://www.somewebsite.com')\n <assignment token>\n",
"<import token>\n\n\nclass CustomerShopLocator(BasePage):\n <function token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,693 |
00644387e1193de60582ab41c1d9106c82e0a166
|
#hw3 숫자의 약수 구하기 & 숫자 분류하기
num = int(input("숫자를 입력하세요 : "))
list_1=[]
for i in range(1,num):
if num%i==0 :
list_1.append(i)
print(f"{num}의 약수 :{list_1[0:]}")
|
[
"#hw3 숫자의 약수 구하기 & 숫자 분류하기\r\n\r\nnum = int(input(\"숫자를 입력하세요 : \"))\r\n\r\nlist_1=[]\r\nfor i in range(1,num):\r\n if num%i==0 :\r\n list_1.append(i)\r\nprint(f\"{num}의 약수 :{list_1[0:]}\")\r\n",
"num = int(input('숫자를 입력하세요 : '))\nlist_1 = []\nfor i in range(1, num):\n if num % i == 0:\n list_1.append(i)\nprint(f'{num}의 약수 :{list_1[0:]}')\n",
"<assignment token>\nfor i in range(1, num):\n if num % i == 0:\n list_1.append(i)\nprint(f'{num}의 약수 :{list_1[0:]}')\n",
"<assignment token>\n<code token>\n"
] | false |
98,694 |
56308396d59c8954998b6648c38302827cdcca70
|
# 백준 정렬 단계
# https://www.acmicpc.net/step/9
# 2750 번 : 수 정렬하기
def sort_2750():
n = int(input())
nums = [int(input()) for _ in range(n)]
nums.sort()
for num in nums: print(num)
# 2751 번 : 수 정렬하기 2
# sort쓰면 시간초과
import sys
def sort_2751():
n = int(input())
nums = [int(sys.stdin.readline()) for _ in range(n)]
nums.sort()
for num in nums:
print(num)
# 10989 번 : 수 정렬하기3
import sys
def sort_10989():
n = int(input())
nums = [0] * 10001
for i in range(n):
tmp = int(sys.stdin.readline())
nums[tmp] += 1
for i, cnt in enumerate(nums):
if cnt != 0:
for _ in range(cnt):
print(i)
# 2108 번 : 통계학
import math
import sys
from collections import Counter
def sort_2108():
n = int(sys.stdin.readline()) #수의 갯수
nums = [int(sys.stdin.readline()) for _ in range(n)]
nums.sort()
avg = round(sum(nums)/n) #1. 산술평균
center = nums[n//2] #2. 중앙값
cha = nums[-1] - nums[0] #4. 범위
#3. 최빈값 구하는 식
# freq = {}
# for n in nums:
# if n in freq: continue
# freq[n] = nums.count(n)
# s_freq = sorted(freq.items(), key = lambda x: (-x[1], x[0]))
# freq_num = s_freq[0][0] #최빈값
# if len(s_freq)!=1 and s_freq[0][1] == s_freq[1][1]: #갯수가 같은게 있다면
# freq_num = s_freq[1][0]
freqs = Counter(nums).most_common()
print(freqs)
freq = freqs[0][0]
if len(freqs) != 1 and freqs[0][1] == freqs[1][1]:
freq = freqs[1][0]
print(avg, center, freq, cha, sep='\n')
# 1427 번 : 소트인사이드
import sys
def sort_1427():
n = sys.stdin.readline().rstrip()
sorted_n = sorted(list(n), reverse=True)
print(''.join(sorted_n))
# 11650 번 : 좌표 정렬하기
def sort_11650():
n = int(sys.stdin.readline()) #좌표의 갯수
dots = []
for _ in range(n):
dots.append(list(map(int, sys.stdin.readline().split())))
s_dots = sorted(dots, key = lambda x : (x[0], x[1]))
for x,y in s_dots:
print(x,y)
# 11651 번 : 좌표 정렬하기2
import sys
def sort_11651():
n = int(sys.stdin.readline())
dots = []
for _ in range(n):
dots.append(list(map(int, sys.stdin.readline().split())))
s_dots = sorted(dots, key = lambda x : (x[1], x[0]))
for x,y in s_dots:
print(x, y)
# 1181 번 : 단어 정렬
def sort_1181():
n = int(input())
words = [0] * n
for i in range(n):
words[i] = sys.stdin.readline().rstrip()
s_words = sorted(set(words), key=lambda x:(len(x),x))
for w in s_words:
print(w)
# 10814 번 : 나이순 정렬
# 나이를 str 그대로 비교하면 틀리는것같다...
import sys
def sort_10814():
n = int(sys.stdin.readline())
people = []
for _ in range(n):
age, name = sys.stdin.readline().rstrip().split()
people.append([int(age), name])
people.sort(key = lambda x : x[0])
for x, y in people:
print(x, y)
|
[
"# 백준 정렬 단계\n# https://www.acmicpc.net/step/9\n\n\n# 2750 번 : 수 정렬하기\ndef sort_2750():\n n = int(input())\n nums = [int(input()) for _ in range(n)]\n\n nums.sort()\n for num in nums: print(num)\n\n\n# 2751 번 : 수 정렬하기 2\n# sort쓰면 시간초과\nimport sys\ndef sort_2751():\n n = int(input())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n\n nums.sort()\n for num in nums: \n print(num)\n\n\n# 10989 번 : 수 정렬하기3\nimport sys\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n# 2108 번 : 통계학\nimport math\nimport sys\nfrom collections import Counter\ndef sort_2108():\n n = int(sys.stdin.readline()) #수의 갯수\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n avg = round(sum(nums)/n) #1. 산술평균\n center = nums[n//2] #2. 중앙값\n cha = nums[-1] - nums[0] #4. 범위\n #3. 최빈값 구하는 식\n # freq = {}\n # for n in nums:\n # if n in freq: continue\n # freq[n] = nums.count(n)\n # s_freq = sorted(freq.items(), key = lambda x: (-x[1], x[0]))\n # freq_num = s_freq[0][0] #최빈값\n # if len(s_freq)!=1 and s_freq[0][1] == s_freq[1][1]: #갯수가 같은게 있다면\n # freq_num = s_freq[1][0]\n freqs = Counter(nums).most_common()\n print(freqs)\n freq = freqs[0][0]\n if len(freqs) != 1 and freqs[0][1] == freqs[1][1]:\n freq = freqs[1][0]\n print(avg, center, freq, cha, sep='\\n')\n\n\n\n# 1427 번 : 소트인사이드\nimport sys\ndef sort_1427():\n n = sys.stdin.readline().rstrip()\n sorted_n = sorted(list(n), reverse=True)\n print(''.join(sorted_n))\n\n\n# 11650 번 : 좌표 정렬하기\ndef sort_11650():\n n = int(sys.stdin.readline()) #좌표의 갯수\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key = lambda x : (x[0], x[1]))\n for x,y in s_dots:\n print(x,y)\n\n\n# 11651 번 : 좌표 정렬하기2\nimport sys\ndef sort_11651():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key = lambda x : (x[1], x[0]))\n for x,y in s_dots:\n print(x, y)\n\n\n# 1181 번 : 단어 정렬\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n \n s_words = sorted(set(words), key=lambda x:(len(x),x))\n for w in s_words:\n print(w)\n\n\n# 10814 번 : 나이순 정렬\n# 나이를 str 그대로 비교하면 틀리는것같다...\nimport sys\ndef sort_10814():\n n = int(sys.stdin.readline())\n people = []\n for _ in range(n):\n age, name = sys.stdin.readline().rstrip().split()\n people.append([int(age), name])\n people.sort(key = lambda x : x[0])\n for x, y in people:\n print(x, y)",
"def sort_2750():\n n = int(input())\n nums = [int(input()) for _ in range(n)]\n nums.sort()\n for num in nums:\n print(num)\n\n\nimport sys\n\n\ndef sort_2751():\n n = int(input())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n for num in nums:\n print(num)\n\n\nimport sys\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\nimport math\nimport sys\nfrom collections import Counter\n\n\ndef sort_2108():\n n = int(sys.stdin.readline())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n avg = round(sum(nums) / n)\n center = nums[n // 2]\n cha = nums[-1] - nums[0]\n freqs = Counter(nums).most_common()\n print(freqs)\n freq = freqs[0][0]\n if len(freqs) != 1 and freqs[0][1] == freqs[1][1]:\n freq = freqs[1][0]\n print(avg, center, freq, cha, sep='\\n')\n\n\nimport sys\n\n\ndef sort_1427():\n n = sys.stdin.readline().rstrip()\n sorted_n = sorted(list(n), reverse=True)\n print(''.join(sorted_n))\n\n\ndef sort_11650():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[0], x[1]))\n for x, y in s_dots:\n print(x, y)\n\n\nimport sys\n\n\ndef sort_11651():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[1], x[0]))\n for x, y in s_dots:\n print(x, y)\n\n\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n s_words = sorted(set(words), key=lambda x: (len(x), x))\n for w in s_words:\n print(w)\n\n\nimport sys\n\n\ndef sort_10814():\n n = int(sys.stdin.readline())\n people = []\n for _ in range(n):\n age, name = sys.stdin.readline().rstrip().split()\n people.append([int(age), name])\n people.sort(key=lambda x: x[0])\n for x, y in people:\n print(x, y)\n",
"def sort_2750():\n n = int(input())\n nums = [int(input()) for _ in range(n)]\n nums.sort()\n for num in nums:\n print(num)\n\n\n<import token>\n\n\ndef sort_2751():\n n = int(input())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n for num in nums:\n print(num)\n\n\n<import token>\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n<import token>\n\n\ndef sort_2108():\n n = int(sys.stdin.readline())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n avg = round(sum(nums) / n)\n center = nums[n // 2]\n cha = nums[-1] - nums[0]\n freqs = Counter(nums).most_common()\n print(freqs)\n freq = freqs[0][0]\n if len(freqs) != 1 and freqs[0][1] == freqs[1][1]:\n freq = freqs[1][0]\n print(avg, center, freq, cha, sep='\\n')\n\n\n<import token>\n\n\ndef sort_1427():\n n = sys.stdin.readline().rstrip()\n sorted_n = sorted(list(n), reverse=True)\n print(''.join(sorted_n))\n\n\ndef sort_11650():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[0], x[1]))\n for x, y in s_dots:\n print(x, y)\n\n\n<import token>\n\n\ndef sort_11651():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[1], x[0]))\n for x, y in s_dots:\n print(x, y)\n\n\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n s_words = sorted(set(words), key=lambda x: (len(x), x))\n for w in s_words:\n print(w)\n\n\n<import token>\n\n\ndef sort_10814():\n n = int(sys.stdin.readline())\n people = []\n for _ in range(n):\n age, name = sys.stdin.readline().rstrip().split()\n people.append([int(age), name])\n people.sort(key=lambda x: x[0])\n for x, y in people:\n print(x, y)\n",
"<function token>\n<import token>\n\n\ndef sort_2751():\n n = int(input())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n for num in nums:\n print(num)\n\n\n<import token>\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n<import token>\n\n\ndef sort_2108():\n n = int(sys.stdin.readline())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n avg = round(sum(nums) / n)\n center = nums[n // 2]\n cha = nums[-1] - nums[0]\n freqs = Counter(nums).most_common()\n print(freqs)\n freq = freqs[0][0]\n if len(freqs) != 1 and freqs[0][1] == freqs[1][1]:\n freq = freqs[1][0]\n print(avg, center, freq, cha, sep='\\n')\n\n\n<import token>\n\n\ndef sort_1427():\n n = sys.stdin.readline().rstrip()\n sorted_n = sorted(list(n), reverse=True)\n print(''.join(sorted_n))\n\n\ndef sort_11650():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[0], x[1]))\n for x, y in s_dots:\n print(x, y)\n\n\n<import token>\n\n\ndef sort_11651():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[1], x[0]))\n for x, y in s_dots:\n print(x, y)\n\n\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n s_words = sorted(set(words), key=lambda x: (len(x), x))\n for w in s_words:\n print(w)\n\n\n<import token>\n\n\ndef sort_10814():\n n = int(sys.stdin.readline())\n people = []\n for _ in range(n):\n age, name = sys.stdin.readline().rstrip().split()\n people.append([int(age), name])\n people.sort(key=lambda x: x[0])\n for x, y in people:\n print(x, y)\n",
"<function token>\n<import token>\n\n\ndef sort_2751():\n n = int(input())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n for num in nums:\n print(num)\n\n\n<import token>\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n<import token>\n\n\ndef sort_2108():\n n = int(sys.stdin.readline())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n avg = round(sum(nums) / n)\n center = nums[n // 2]\n cha = nums[-1] - nums[0]\n freqs = Counter(nums).most_common()\n print(freqs)\n freq = freqs[0][0]\n if len(freqs) != 1 and freqs[0][1] == freqs[1][1]:\n freq = freqs[1][0]\n print(avg, center, freq, cha, sep='\\n')\n\n\n<import token>\n<function token>\n\n\ndef sort_11650():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[0], x[1]))\n for x, y in s_dots:\n print(x, y)\n\n\n<import token>\n\n\ndef sort_11651():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[1], x[0]))\n for x, y in s_dots:\n print(x, y)\n\n\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n s_words = sorted(set(words), key=lambda x: (len(x), x))\n for w in s_words:\n print(w)\n\n\n<import token>\n\n\ndef sort_10814():\n n = int(sys.stdin.readline())\n people = []\n for _ in range(n):\n age, name = sys.stdin.readline().rstrip().split()\n people.append([int(age), name])\n people.sort(key=lambda x: x[0])\n for x, y in people:\n print(x, y)\n",
"<function token>\n<import token>\n\n\ndef sort_2751():\n n = int(input())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n for num in nums:\n print(num)\n\n\n<import token>\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n<import token>\n\n\ndef sort_2108():\n n = int(sys.stdin.readline())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n avg = round(sum(nums) / n)\n center = nums[n // 2]\n cha = nums[-1] - nums[0]\n freqs = Counter(nums).most_common()\n print(freqs)\n freq = freqs[0][0]\n if len(freqs) != 1 and freqs[0][1] == freqs[1][1]:\n freq = freqs[1][0]\n print(avg, center, freq, cha, sep='\\n')\n\n\n<import token>\n<function token>\n\n\ndef sort_11650():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[0], x[1]))\n for x, y in s_dots:\n print(x, y)\n\n\n<import token>\n\n\ndef sort_11651():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[1], x[0]))\n for x, y in s_dots:\n print(x, y)\n\n\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n s_words = sorted(set(words), key=lambda x: (len(x), x))\n for w in s_words:\n print(w)\n\n\n<import token>\n<function token>\n",
"<function token>\n<import token>\n\n\ndef sort_2751():\n n = int(input())\n nums = [int(sys.stdin.readline()) for _ in range(n)]\n nums.sort()\n for num in nums:\n print(num)\n\n\n<import token>\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n<import token>\n<function token>\n<import token>\n<function token>\n\n\ndef sort_11650():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[0], x[1]))\n for x, y in s_dots:\n print(x, y)\n\n\n<import token>\n\n\ndef sort_11651():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[1], x[0]))\n for x, y in s_dots:\n print(x, y)\n\n\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n s_words = sorted(set(words), key=lambda x: (len(x), x))\n for w in s_words:\n print(w)\n\n\n<import token>\n<function token>\n",
"<function token>\n<import token>\n<function token>\n<import token>\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n<import token>\n<function token>\n<import token>\n<function token>\n\n\ndef sort_11650():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[0], x[1]))\n for x, y in s_dots:\n print(x, y)\n\n\n<import token>\n\n\ndef sort_11651():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[1], x[0]))\n for x, y in s_dots:\n print(x, y)\n\n\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n s_words = sorted(set(words), key=lambda x: (len(x), x))\n for w in s_words:\n print(w)\n\n\n<import token>\n<function token>\n",
"<function token>\n<import token>\n<function token>\n<import token>\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n<import token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n\n\ndef sort_11651():\n n = int(sys.stdin.readline())\n dots = []\n for _ in range(n):\n dots.append(list(map(int, sys.stdin.readline().split())))\n s_dots = sorted(dots, key=lambda x: (x[1], x[0]))\n for x, y in s_dots:\n print(x, y)\n\n\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n s_words = sorted(set(words), key=lambda x: (len(x), x))\n for w in s_words:\n print(w)\n\n\n<import token>\n<function token>\n",
"<function token>\n<import token>\n<function token>\n<import token>\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n<import token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n<function token>\n\n\ndef sort_1181():\n n = int(input())\n words = [0] * n\n for i in range(n):\n words[i] = sys.stdin.readline().rstrip()\n s_words = sorted(set(words), key=lambda x: (len(x), x))\n for w in s_words:\n print(w)\n\n\n<import token>\n<function token>\n",
"<function token>\n<import token>\n<function token>\n<import token>\n\n\ndef sort_10989():\n n = int(input())\n nums = [0] * 10001\n for i in range(n):\n tmp = int(sys.stdin.readline())\n nums[tmp] += 1\n for i, cnt in enumerate(nums):\n if cnt != 0:\n for _ in range(cnt):\n print(i)\n\n\n<import token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n<function token>\n",
"<function token>\n<import token>\n<function token>\n<import token>\n<function token>\n<import token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n<function token>\n"
] | false |
98,695 |
36256d98dc83e13a553d84cae217ae2ef283446a
|
import matplotlib.pyplot as plt
plt.scatter(2, 4, s=200)
#Set chart title and label axes
plt.title("Square Numbers", fontsize=24)
plt.xlabel("Value", fontsize=14)
plt.ylabel("Square of Value", fontsize=14)
#Set size of tick labels
plt.tick_params(axis='both', which='major', labelsize=14)
plt.show()
|
[
"import matplotlib.pyplot as plt\n\nplt.scatter(2, 4, s=200)\n\n#Set chart title and label axes\nplt.title(\"Square Numbers\", fontsize=24)\nplt.xlabel(\"Value\", fontsize=14)\nplt.ylabel(\"Square of Value\", fontsize=14)\n\n#Set size of tick labels\nplt.tick_params(axis='both', which='major', labelsize=14)\nplt.show()",
"import matplotlib.pyplot as plt\nplt.scatter(2, 4, s=200)\nplt.title('Square Numbers', fontsize=24)\nplt.xlabel('Value', fontsize=14)\nplt.ylabel('Square of Value', fontsize=14)\nplt.tick_params(axis='both', which='major', labelsize=14)\nplt.show()\n",
"<import token>\nplt.scatter(2, 4, s=200)\nplt.title('Square Numbers', fontsize=24)\nplt.xlabel('Value', fontsize=14)\nplt.ylabel('Square of Value', fontsize=14)\nplt.tick_params(axis='both', which='major', labelsize=14)\nplt.show()\n",
"<import token>\n<code token>\n"
] | false |
98,696 |
4817a92904438c71aabdfd3788f19cb8ab374860
|
import pdb,os,time,subprocess,zipfile
##a function to download tesscut TPF's
def qtic(ticnum,datadir = '/Volumes/UTOld/tessdata/',xsize=31,ysize=31,sector=None,dummymode=False):
import tic_query
##get the ra and dec from TIC then just run the other function
ra,dec = tic_query.tic_radec(ticnum)
dummy,dummy2 = qradec(ticnum,ra,dec,datadir=datadir,xsize=xsize,ysize=ysize,sector=sector,dummymode=dummymode)
return dummy,dummy2
def qradec(tic,ra,dec,datadir = '/Volumes/UTOld/tessdata/',xsize=31,ysize=31,sector=None,dummymode=False):
##ra and dec are either floats or strings
##sector and tic are integeres, as as x/ysize
##build the command
queryurl = 'https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra='+str(ra)+'&dec='+str(dec)+'&x='+str(xsize)+'&y='+str(ysize)+'&units=px'
# pdb.set_trace()
# # https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=136.524818800348&dec=-76.7857934853963&y=51&x=51&units=px§or=all
if sector != None:
queryurl += '§or='+str(sector)
else:
queryurl += '§or=All'
command = '/usr/local/bin/wget -O ./tesscut_tmp/latest.zip "'+queryurl + '"'
process = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE)
process.wait()
#print('subprocess returncode: ' + str(process.returncode))
if process.returncode !=0:
##try the command again
process = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE)
process.wait()
if process.returncode != 0:
print('bad return code, something wrong!')
errorfile = open('latest_tesscut_errorfile.txt','wb')
errorfile.write(command + ' \n')
errorfile.write(str(process.returncode) + ' \n')
errorfile.flush()
errorfile.close()
pdb.set_trace()
#pdb.set_trace()
##'/usr/local/bin/wget -O ./tesscut_tmp/latest.zip "https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=179.2008&dec=-22.4893&y=1&x=1§or=10"'
##'/usr/local/bin/wget -O ./tesscut_tmp/latest.zip "https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=347.45642&dec=-14.51055&y=1&x=1§or=10"'
filesize = os.path.getsize('./tesscut_tmp/latest.zip')
if filesize < 5000:
##this is the case for a non-observed target in the input sector
return -1,-1
if dummymode == True: return 0,0 ##if in dummymode=True, just checking target observation, dont extract any files
##now repackage the download, basically we want one subdir for each tic number that gets updated every time if new stuff appears
##check directory exists:
datalocation = datadir+'tic'+str(tic)+'/'
if os.path.exists(datalocation)==False: os.makedirs(datalocation)
##unzip to the new data location
zip_ref = zipfile.ZipFile('./tesscut_tmp/latest.zip', 'r')
zip_ref.extractall(datalocation)
zip_ref.close()
##now write in the corresponding log
datestamp = time.strftime('%Y%m%d-%H:%M:%S', time.localtime(time.time()))
logfile = open(datalocation+'tic'+str(tic)+'_dllog.txt','ab')
logfile.write(datestamp+' '+str(ra)+' '+str(dec)+' '+str(xsize)+'x'+str(ysize)+' '+str(sector)+' \n')
logfile.close()
return 0,datalocation
##uncomment to test with DS tuc
# tic = 410214986
# ra = 354.91395712
# dec = -69.19558694
# test = qradec(tic,ra,dec)
##uncomment to test an unobserved target:
# tic = 17554529
# ra = 67.15464437
# dec=19.18028285
# test = qradec(tic,ra,dec)
|
[
"import pdb,os,time,subprocess,zipfile\n\n##a function to download tesscut TPF's\n\ndef qtic(ticnum,datadir = '/Volumes/UTOld/tessdata/',xsize=31,ysize=31,sector=None,dummymode=False):\n import tic_query\n ##get the ra and dec from TIC then just run the other function\n ra,dec = tic_query.tic_radec(ticnum)\n dummy,dummy2 = qradec(ticnum,ra,dec,datadir=datadir,xsize=xsize,ysize=ysize,sector=sector,dummymode=dummymode)\n return dummy,dummy2\n \n \ndef qradec(tic,ra,dec,datadir = '/Volumes/UTOld/tessdata/',xsize=31,ysize=31,sector=None,dummymode=False):\n ##ra and dec are either floats or strings\n ##sector and tic are integeres, as as x/ysize\n \n ##build the command\n queryurl = 'https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra='+str(ra)+'&dec='+str(dec)+'&x='+str(xsize)+'&y='+str(ysize)+'&units=px'\n # pdb.set_trace() \n # # https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=136.524818800348&dec=-76.7857934853963&y=51&x=51&units=px§or=all\n\n if sector != None: \n queryurl += '§or='+str(sector)\n else:\n queryurl += '§or=All'\n command = '/usr/local/bin/wget -O ./tesscut_tmp/latest.zip \"'+queryurl + '\"' \n \n process = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE) \n process.wait()\n #print('subprocess returncode: ' + str(process.returncode))\n \n if process.returncode !=0:\n ##try the command again\n process = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE) \n process.wait()\n \n if process.returncode != 0: \n print('bad return code, something wrong!')\n errorfile = open('latest_tesscut_errorfile.txt','wb')\n errorfile.write(command + ' \\n')\n errorfile.write(str(process.returncode) + ' \\n')\n errorfile.flush()\n errorfile.close()\n \n pdb.set_trace()\n \n \n \n #pdb.set_trace()\n ##'/usr/local/bin/wget -O ./tesscut_tmp/latest.zip \"https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=179.2008&dec=-22.4893&y=1&x=1§or=10\"'\n ##'/usr/local/bin/wget -O ./tesscut_tmp/latest.zip \"https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=347.45642&dec=-14.51055&y=1&x=1§or=10\"'\n filesize = os.path.getsize('./tesscut_tmp/latest.zip')\n if filesize < 5000:\n ##this is the case for a non-observed target in the input sector\n return -1,-1\n if dummymode == True: return 0,0 ##if in dummymode=True, just checking target observation, dont extract any files\n \n ##now repackage the download, basically we want one subdir for each tic number that gets updated every time if new stuff appears \n ##check directory exists:\n datalocation = datadir+'tic'+str(tic)+'/'\n if os.path.exists(datalocation)==False: os.makedirs(datalocation)\n \n ##unzip to the new data location\n zip_ref = zipfile.ZipFile('./tesscut_tmp/latest.zip', 'r')\n zip_ref.extractall(datalocation)\n zip_ref.close()\n ##now write in the corresponding log\n datestamp = time.strftime('%Y%m%d-%H:%M:%S', time.localtime(time.time()))\n logfile = open(datalocation+'tic'+str(tic)+'_dllog.txt','ab')\n logfile.write(datestamp+' '+str(ra)+' '+str(dec)+' '+str(xsize)+'x'+str(ysize)+' '+str(sector)+' \\n')\n logfile.close()\n\n return 0,datalocation\n \n \n \n##uncomment to test with DS tuc\n# tic = 410214986\n# ra = 354.91395712\n# dec = -69.19558694\n# test = qradec(tic,ra,dec)\n\n##uncomment to test an unobserved target:\n# tic = 17554529\n# ra = 67.15464437\n# dec=19.18028285\n# test = qradec(tic,ra,dec)",
"import pdb, os, time, subprocess, zipfile\n\n\ndef qtic(ticnum, datadir='/Volumes/UTOld/tessdata/', xsize=31, ysize=31,\n sector=None, dummymode=False):\n import tic_query\n ra, dec = tic_query.tic_radec(ticnum)\n dummy, dummy2 = qradec(ticnum, ra, dec, datadir=datadir, xsize=xsize,\n ysize=ysize, sector=sector, dummymode=dummymode)\n return dummy, dummy2\n\n\ndef qradec(tic, ra, dec, datadir='/Volumes/UTOld/tessdata/', xsize=31,\n ysize=31, sector=None, dummymode=False):\n queryurl = 'https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=' + str(ra\n ) + '&dec=' + str(dec) + '&x=' + str(xsize) + '&y=' + str(ysize\n ) + '&units=px'\n if sector != None:\n queryurl += '§or=' + str(sector)\n else:\n queryurl += '§or=All'\n command = ('/usr/local/bin/wget -O ./tesscut_tmp/latest.zip \"' +\n queryurl + '\"')\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n if process.returncode != 0:\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n if process.returncode != 0:\n print('bad return code, something wrong!')\n errorfile = open('latest_tesscut_errorfile.txt', 'wb')\n errorfile.write(command + ' \\n')\n errorfile.write(str(process.returncode) + ' \\n')\n errorfile.flush()\n errorfile.close()\n pdb.set_trace()\n filesize = os.path.getsize('./tesscut_tmp/latest.zip')\n if filesize < 5000:\n return -1, -1\n if dummymode == True:\n return 0, 0\n datalocation = datadir + 'tic' + str(tic) + '/'\n if os.path.exists(datalocation) == False:\n os.makedirs(datalocation)\n zip_ref = zipfile.ZipFile('./tesscut_tmp/latest.zip', 'r')\n zip_ref.extractall(datalocation)\n zip_ref.close()\n datestamp = time.strftime('%Y%m%d-%H:%M:%S', time.localtime(time.time()))\n logfile = open(datalocation + 'tic' + str(tic) + '_dllog.txt', 'ab')\n logfile.write(datestamp + ' ' + str(ra) + ' ' + str(dec) + ' ' + str(\n xsize) + 'x' + str(ysize) + ' ' + str(sector) + ' \\n')\n logfile.close()\n return 0, datalocation\n",
"<import token>\n\n\ndef qtic(ticnum, datadir='/Volumes/UTOld/tessdata/', xsize=31, ysize=31,\n sector=None, dummymode=False):\n import tic_query\n ra, dec = tic_query.tic_radec(ticnum)\n dummy, dummy2 = qradec(ticnum, ra, dec, datadir=datadir, xsize=xsize,\n ysize=ysize, sector=sector, dummymode=dummymode)\n return dummy, dummy2\n\n\ndef qradec(tic, ra, dec, datadir='/Volumes/UTOld/tessdata/', xsize=31,\n ysize=31, sector=None, dummymode=False):\n queryurl = 'https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=' + str(ra\n ) + '&dec=' + str(dec) + '&x=' + str(xsize) + '&y=' + str(ysize\n ) + '&units=px'\n if sector != None:\n queryurl += '§or=' + str(sector)\n else:\n queryurl += '§or=All'\n command = ('/usr/local/bin/wget -O ./tesscut_tmp/latest.zip \"' +\n queryurl + '\"')\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n if process.returncode != 0:\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n if process.returncode != 0:\n print('bad return code, something wrong!')\n errorfile = open('latest_tesscut_errorfile.txt', 'wb')\n errorfile.write(command + ' \\n')\n errorfile.write(str(process.returncode) + ' \\n')\n errorfile.flush()\n errorfile.close()\n pdb.set_trace()\n filesize = os.path.getsize('./tesscut_tmp/latest.zip')\n if filesize < 5000:\n return -1, -1\n if dummymode == True:\n return 0, 0\n datalocation = datadir + 'tic' + str(tic) + '/'\n if os.path.exists(datalocation) == False:\n os.makedirs(datalocation)\n zip_ref = zipfile.ZipFile('./tesscut_tmp/latest.zip', 'r')\n zip_ref.extractall(datalocation)\n zip_ref.close()\n datestamp = time.strftime('%Y%m%d-%H:%M:%S', time.localtime(time.time()))\n logfile = open(datalocation + 'tic' + str(tic) + '_dllog.txt', 'ab')\n logfile.write(datestamp + ' ' + str(ra) + ' ' + str(dec) + ' ' + str(\n xsize) + 'x' + str(ysize) + ' ' + str(sector) + ' \\n')\n logfile.close()\n return 0, datalocation\n",
"<import token>\n<function token>\n\n\ndef qradec(tic, ra, dec, datadir='/Volumes/UTOld/tessdata/', xsize=31,\n ysize=31, sector=None, dummymode=False):\n queryurl = 'https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=' + str(ra\n ) + '&dec=' + str(dec) + '&x=' + str(xsize) + '&y=' + str(ysize\n ) + '&units=px'\n if sector != None:\n queryurl += '§or=' + str(sector)\n else:\n queryurl += '§or=All'\n command = ('/usr/local/bin/wget -O ./tesscut_tmp/latest.zip \"' +\n queryurl + '\"')\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n if process.returncode != 0:\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n if process.returncode != 0:\n print('bad return code, something wrong!')\n errorfile = open('latest_tesscut_errorfile.txt', 'wb')\n errorfile.write(command + ' \\n')\n errorfile.write(str(process.returncode) + ' \\n')\n errorfile.flush()\n errorfile.close()\n pdb.set_trace()\n filesize = os.path.getsize('./tesscut_tmp/latest.zip')\n if filesize < 5000:\n return -1, -1\n if dummymode == True:\n return 0, 0\n datalocation = datadir + 'tic' + str(tic) + '/'\n if os.path.exists(datalocation) == False:\n os.makedirs(datalocation)\n zip_ref = zipfile.ZipFile('./tesscut_tmp/latest.zip', 'r')\n zip_ref.extractall(datalocation)\n zip_ref.close()\n datestamp = time.strftime('%Y%m%d-%H:%M:%S', time.localtime(time.time()))\n logfile = open(datalocation + 'tic' + str(tic) + '_dllog.txt', 'ab')\n logfile.write(datestamp + ' ' + str(ra) + ' ' + str(dec) + ' ' + str(\n xsize) + 'x' + str(ysize) + ' ' + str(sector) + ' \\n')\n logfile.close()\n return 0, datalocation\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
98,697 |
6a3cd99bb2b71278021e6e959976de2d0c4ca741
|
from aiohttp import web
import asyncio
from easydict import EasyDict as edict
from graphql import graphql
import graphql_ws
from graphql_ws.aiohttp import AiohttpConnectionContext
import json
from .graphiql_template import make_template
class GraphQLController:
def __init__(self, schema, context_builder, middleware):
self.schema = schema
self.context_builder = context_builder
self.middleware = middleware
self.subscription_server = graphql_ws.SubscriptionServer(
schema, AiohttpConnectionContext
)
self.websockets = set()
def add_routes(self, app):
routes = [
app.router.add_get("/graphiql", self.handle_root),
app.router.add_get("/graphql", self.handle_graphql),
app.router.add_post("/graphql", self.handle_graphql),
app.router.add_get("/subscriptions", self.handle_subscriptions)
]
return routes
async def shutdown(self):
if len(self.websockets) > 0:
await asyncio.wait([wsr.close() for wsr in self.websockets])
async def handle_root(self, request):
template = make_template(request.host)
return web.Response(text=template, content_type="text/html")
async def handle_subscriptions(self, request):
response = web.WebSocketResponse(protocols=(graphql_ws.WS_PROTOCOL,))
self.websockets.add(response)
await response.prepare(request)
await self.subscription_server.handle(response, self.context_builder(request))
self.websockets.remove(response)
return response
async def get_query_document(self, request, content_type):
if content_type in ("application/graphql"):
return {'query': await request.text()}
elif content_type in ("application/json", "text/plain"):
return await request.json()
elif request.content_type in (
"application/x-www-form-urlencoded",
"multipart/form-data",
):
return await dict(request.post())
raise Exception(f"Unhandled content type '{content_type}'")
async def handle_graphql(self, request):
query_document = await self.get_query_document(request, request.content_type)
print(query_document['query'])
result = await graphql(
self.schema,
source=query_document['query'],
variable_values=query_document.get('variables', None),
operation_name=query_document.get('operationName', None),
context_value=self.context_builder(request),
middleware=self.middleware
)
response = {'data': result.data}
if result.errors:
response["errors"] = [error.formatted for error in result.errors]
return web.json_response(response)
|
[
"from aiohttp import web\nimport asyncio\nfrom easydict import EasyDict as edict\nfrom graphql import graphql\nimport graphql_ws\nfrom graphql_ws.aiohttp import AiohttpConnectionContext\nimport json\n\nfrom .graphiql_template import make_template\n\n\nclass GraphQLController:\n\n def __init__(self, schema, context_builder, middleware):\n self.schema = schema\n self.context_builder = context_builder\n self.middleware = middleware\n\n self.subscription_server = graphql_ws.SubscriptionServer(\n schema, AiohttpConnectionContext\n )\n self.websockets = set()\n\n\n def add_routes(self, app):\n routes = [\n app.router.add_get(\"/graphiql\", self.handle_root),\n app.router.add_get(\"/graphql\", self.handle_graphql),\n app.router.add_post(\"/graphql\", self.handle_graphql),\n app.router.add_get(\"/subscriptions\", self.handle_subscriptions)\n ]\n\n return routes\n\n\n async def shutdown(self):\n if len(self.websockets) > 0:\n await asyncio.wait([wsr.close() for wsr in self.websockets])\n\n\n async def handle_root(self, request):\n template = make_template(request.host)\n return web.Response(text=template, content_type=\"text/html\")\n\n\n async def handle_subscriptions(self, request):\n response = web.WebSocketResponse(protocols=(graphql_ws.WS_PROTOCOL,))\n self.websockets.add(response)\n await response.prepare(request)\n await self.subscription_server.handle(response, self.context_builder(request))\n self.websockets.remove(response)\n return response\n\n\n async def get_query_document(self, request, content_type):\n if content_type in (\"application/graphql\"):\n return {'query': await request.text()}\n elif content_type in (\"application/json\", \"text/plain\"):\n return await request.json()\n elif request.content_type in (\n \"application/x-www-form-urlencoded\",\n \"multipart/form-data\",\n ):\n return await dict(request.post())\n\n raise Exception(f\"Unhandled content type '{content_type}'\")\n\n\n async def handle_graphql(self, request):\n\n query_document = await self.get_query_document(request, request.content_type)\n\n print(query_document['query'])\n\n result = await graphql(\n self.schema,\n source=query_document['query'],\n variable_values=query_document.get('variables', None),\n operation_name=query_document.get('operationName', None),\n context_value=self.context_builder(request),\n middleware=self.middleware\n )\n\n response = {'data': result.data}\n if result.errors:\n response[\"errors\"] = [error.formatted for error in result.errors]\n\n return web.json_response(response)\n",
"from aiohttp import web\nimport asyncio\nfrom easydict import EasyDict as edict\nfrom graphql import graphql\nimport graphql_ws\nfrom graphql_ws.aiohttp import AiohttpConnectionContext\nimport json\nfrom .graphiql_template import make_template\n\n\nclass GraphQLController:\n\n def __init__(self, schema, context_builder, middleware):\n self.schema = schema\n self.context_builder = context_builder\n self.middleware = middleware\n self.subscription_server = graphql_ws.SubscriptionServer(schema,\n AiohttpConnectionContext)\n self.websockets = set()\n\n def add_routes(self, app):\n routes = [app.router.add_get('/graphiql', self.handle_root), app.\n router.add_get('/graphql', self.handle_graphql), app.router.\n add_post('/graphql', self.handle_graphql), app.router.add_get(\n '/subscriptions', self.handle_subscriptions)]\n return routes\n\n async def shutdown(self):\n if len(self.websockets) > 0:\n await asyncio.wait([wsr.close() for wsr in self.websockets])\n\n async def handle_root(self, request):\n template = make_template(request.host)\n return web.Response(text=template, content_type='text/html')\n\n async def handle_subscriptions(self, request):\n response = web.WebSocketResponse(protocols=(graphql_ws.WS_PROTOCOL,))\n self.websockets.add(response)\n await response.prepare(request)\n await self.subscription_server.handle(response, self.\n context_builder(request))\n self.websockets.remove(response)\n return response\n\n async def get_query_document(self, request, content_type):\n if content_type in 'application/graphql':\n return {'query': await request.text()}\n elif content_type in ('application/json', 'text/plain'):\n return await request.json()\n elif request.content_type in ('application/x-www-form-urlencoded',\n 'multipart/form-data'):\n return await dict(request.post())\n raise Exception(f\"Unhandled content type '{content_type}'\")\n\n async def handle_graphql(self, request):\n query_document = await self.get_query_document(request, request.\n content_type)\n print(query_document['query'])\n result = await graphql(self.schema, source=query_document['query'],\n variable_values=query_document.get('variables', None),\n operation_name=query_document.get('operationName', None),\n context_value=self.context_builder(request), middleware=self.\n middleware)\n response = {'data': result.data}\n if result.errors:\n response['errors'] = [error.formatted for error in result.errors]\n return web.json_response(response)\n",
"<import token>\n\n\nclass GraphQLController:\n\n def __init__(self, schema, context_builder, middleware):\n self.schema = schema\n self.context_builder = context_builder\n self.middleware = middleware\n self.subscription_server = graphql_ws.SubscriptionServer(schema,\n AiohttpConnectionContext)\n self.websockets = set()\n\n def add_routes(self, app):\n routes = [app.router.add_get('/graphiql', self.handle_root), app.\n router.add_get('/graphql', self.handle_graphql), app.router.\n add_post('/graphql', self.handle_graphql), app.router.add_get(\n '/subscriptions', self.handle_subscriptions)]\n return routes\n\n async def shutdown(self):\n if len(self.websockets) > 0:\n await asyncio.wait([wsr.close() for wsr in self.websockets])\n\n async def handle_root(self, request):\n template = make_template(request.host)\n return web.Response(text=template, content_type='text/html')\n\n async def handle_subscriptions(self, request):\n response = web.WebSocketResponse(protocols=(graphql_ws.WS_PROTOCOL,))\n self.websockets.add(response)\n await response.prepare(request)\n await self.subscription_server.handle(response, self.\n context_builder(request))\n self.websockets.remove(response)\n return response\n\n async def get_query_document(self, request, content_type):\n if content_type in 'application/graphql':\n return {'query': await request.text()}\n elif content_type in ('application/json', 'text/plain'):\n return await request.json()\n elif request.content_type in ('application/x-www-form-urlencoded',\n 'multipart/form-data'):\n return await dict(request.post())\n raise Exception(f\"Unhandled content type '{content_type}'\")\n\n async def handle_graphql(self, request):\n query_document = await self.get_query_document(request, request.\n content_type)\n print(query_document['query'])\n result = await graphql(self.schema, source=query_document['query'],\n variable_values=query_document.get('variables', None),\n operation_name=query_document.get('operationName', None),\n context_value=self.context_builder(request), middleware=self.\n middleware)\n response = {'data': result.data}\n if result.errors:\n response['errors'] = [error.formatted for error in result.errors]\n return web.json_response(response)\n",
"<import token>\n\n\nclass GraphQLController:\n\n def __init__(self, schema, context_builder, middleware):\n self.schema = schema\n self.context_builder = context_builder\n self.middleware = middleware\n self.subscription_server = graphql_ws.SubscriptionServer(schema,\n AiohttpConnectionContext)\n self.websockets = set()\n <function token>\n\n async def shutdown(self):\n if len(self.websockets) > 0:\n await asyncio.wait([wsr.close() for wsr in self.websockets])\n\n async def handle_root(self, request):\n template = make_template(request.host)\n return web.Response(text=template, content_type='text/html')\n\n async def handle_subscriptions(self, request):\n response = web.WebSocketResponse(protocols=(graphql_ws.WS_PROTOCOL,))\n self.websockets.add(response)\n await response.prepare(request)\n await self.subscription_server.handle(response, self.\n context_builder(request))\n self.websockets.remove(response)\n return response\n\n async def get_query_document(self, request, content_type):\n if content_type in 'application/graphql':\n return {'query': await request.text()}\n elif content_type in ('application/json', 'text/plain'):\n return await request.json()\n elif request.content_type in ('application/x-www-form-urlencoded',\n 'multipart/form-data'):\n return await dict(request.post())\n raise Exception(f\"Unhandled content type '{content_type}'\")\n\n async def handle_graphql(self, request):\n query_document = await self.get_query_document(request, request.\n content_type)\n print(query_document['query'])\n result = await graphql(self.schema, source=query_document['query'],\n variable_values=query_document.get('variables', None),\n operation_name=query_document.get('operationName', None),\n context_value=self.context_builder(request), middleware=self.\n middleware)\n response = {'data': result.data}\n if result.errors:\n response['errors'] = [error.formatted for error in result.errors]\n return web.json_response(response)\n",
"<import token>\n\n\nclass GraphQLController:\n <function token>\n <function token>\n\n async def shutdown(self):\n if len(self.websockets) > 0:\n await asyncio.wait([wsr.close() for wsr in self.websockets])\n\n async def handle_root(self, request):\n template = make_template(request.host)\n return web.Response(text=template, content_type='text/html')\n\n async def handle_subscriptions(self, request):\n response = web.WebSocketResponse(protocols=(graphql_ws.WS_PROTOCOL,))\n self.websockets.add(response)\n await response.prepare(request)\n await self.subscription_server.handle(response, self.\n context_builder(request))\n self.websockets.remove(response)\n return response\n\n async def get_query_document(self, request, content_type):\n if content_type in 'application/graphql':\n return {'query': await request.text()}\n elif content_type in ('application/json', 'text/plain'):\n return await request.json()\n elif request.content_type in ('application/x-www-form-urlencoded',\n 'multipart/form-data'):\n return await dict(request.post())\n raise Exception(f\"Unhandled content type '{content_type}'\")\n\n async def handle_graphql(self, request):\n query_document = await self.get_query_document(request, request.\n content_type)\n print(query_document['query'])\n result = await graphql(self.schema, source=query_document['query'],\n variable_values=query_document.get('variables', None),\n operation_name=query_document.get('operationName', None),\n context_value=self.context_builder(request), middleware=self.\n middleware)\n response = {'data': result.data}\n if result.errors:\n response['errors'] = [error.formatted for error in result.errors]\n return web.json_response(response)\n",
"<import token>\n<class token>\n"
] | false |
98,698 |
10433430aeb13c0b4f37aef82740cd22eaaa0fdc
|
str=input().split('WUB')
res=[]
for i in str:
if i!='':
res.append(i)
print(' '.join(res))
|
[
"str=input().split('WUB')\nres=[]\nfor i in str:\n if i!='':\n res.append(i)\nprint(' '.join(res))\n",
"str = input().split('WUB')\nres = []\nfor i in str:\n if i != '':\n res.append(i)\nprint(' '.join(res))\n",
"<assignment token>\nfor i in str:\n if i != '':\n res.append(i)\nprint(' '.join(res))\n",
"<assignment token>\n<code token>\n"
] | false |
98,699 |
3c99ed4d337eb1a9bc8087c23048fddf5a9eac14
|
from flask import Flask, request, render_template, redirect, flash
import os
app = Flask(__name__)
@app.route("/")
def home():
return render_template("application-form.html")
@app.route("/application", methods=['GET','POST'])
def show_form():
given_name_input = request.form.get("given-name")
surname_input = request.form.get("surname")
salary_input = request.form.get("salary")
job_input = request.form.get("job")
output_text = "Dear %s %s, thank you for applying with the new web order. You have asked for %s to be our %s. Your request will be considered in the order in which it was received." %(given_name_input, surname_input, salary_input, job_input)
return render_template("form_ack.html", submission_text=output_text)
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(debug=True, port=port)
|
[
"from flask import Flask, request, render_template, redirect, flash\nimport os\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef home():\n\treturn render_template(\"application-form.html\")\n\t\[email protected](\"/application\", methods=['GET','POST'])\ndef show_form():\n\tgiven_name_input = request.form.get(\"given-name\")\n\tsurname_input = request.form.get(\"surname\")\n\tsalary_input = request.form.get(\"salary\")\n\tjob_input = request.form.get(\"job\")\n\t\n\toutput_text = \"Dear %s %s, thank you for applying with the new web order. You have asked for %s to be our %s. Your request will be considered in the order in which it was received.\" %(given_name_input, surname_input, salary_input, job_input)\n\t\n\treturn render_template(\"form_ack.html\", submission_text=output_text)\n\t\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(debug=True, port=port)",
"from flask import Flask, request, render_template, redirect, flash\nimport os\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n return render_template('application-form.html')\n\n\[email protected]('/application', methods=['GET', 'POST'])\ndef show_form():\n given_name_input = request.form.get('given-name')\n surname_input = request.form.get('surname')\n salary_input = request.form.get('salary')\n job_input = request.form.get('job')\n output_text = (\n 'Dear %s %s, thank you for applying with the new web order. You have asked for %s to be our %s. Your request will be considered in the order in which it was received.'\n % (given_name_input, surname_input, salary_input, job_input))\n return render_template('form_ack.html', submission_text=output_text)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(debug=True, port=port)\n",
"<import token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n return render_template('application-form.html')\n\n\[email protected]('/application', methods=['GET', 'POST'])\ndef show_form():\n given_name_input = request.form.get('given-name')\n surname_input = request.form.get('surname')\n salary_input = request.form.get('salary')\n job_input = request.form.get('job')\n output_text = (\n 'Dear %s %s, thank you for applying with the new web order. You have asked for %s to be our %s. Your request will be considered in the order in which it was received.'\n % (given_name_input, surname_input, salary_input, job_input))\n return render_template('form_ack.html', submission_text=output_text)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(debug=True, port=port)\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef home():\n return render_template('application-form.html')\n\n\[email protected]('/application', methods=['GET', 'POST'])\ndef show_form():\n given_name_input = request.form.get('given-name')\n surname_input = request.form.get('surname')\n salary_input = request.form.get('salary')\n job_input = request.form.get('job')\n output_text = (\n 'Dear %s %s, thank you for applying with the new web order. You have asked for %s to be our %s. Your request will be considered in the order in which it was received.'\n % (given_name_input, surname_input, salary_input, job_input))\n return render_template('form_ack.html', submission_text=output_text)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(debug=True, port=port)\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef home():\n return render_template('application-form.html')\n\n\[email protected]('/application', methods=['GET', 'POST'])\ndef show_form():\n given_name_input = request.form.get('given-name')\n surname_input = request.form.get('surname')\n salary_input = request.form.get('salary')\n job_input = request.form.get('job')\n output_text = (\n 'Dear %s %s, thank you for applying with the new web order. You have asked for %s to be our %s. Your request will be considered in the order in which it was received.'\n % (given_name_input, surname_input, salary_input, job_input))\n return render_template('form_ack.html', submission_text=output_text)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef home():\n return render_template('application-form.html')\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<code token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.