input
stringlengths
2.65k
237k
output
stringclasses
1 value
-4.77021209e-13, 3.14519374e-11, # -5.56138446e-11, -2.61366665e-16, 1.07178297e-15, 9.40699477e-06, # -1.24737547e-06, -3.51275868e-09, 4.63143752e-10], [-6.39842967e-07, -1.45053745e-12, -4.78570290e-13, 2.52732214e-11, # -5.95511753e-11, -3.58943653e-16, 1.40894562e-15, 1.11866216e-05, # -1.24737547e-06, -3.51275868e-09, 3.79984905e-10], [-5.76584334e-07, -1.31339547e-12, -4.14352758e-13, 3.14519374e-11, # -5.56138446e-11, -3.01601297e-16, 1.07746425e-15, 1.12048280e-05, # -1.24737547e-06, -3.51275868e-09, 5.17637745e-10], [-6.39842967e-07, -1.45053745e-12, -6.05471377e-13, 3.14519374e-11, # -7.53696820e-11, -5.81908407e-16, 9.13415735e-16, 1.16151515e-05, # -1.24737547e-06, -3.51275868e-09, 5.76650484e-10], [-6.97576965e-07, -1.10236447e-12, -3.77583549e-13, 3.80533244e-11, # -5.95511753e-11, -3.51795896e-16, 1.04442382e-15, 1.12048280e-05, # -1.24737547e-06, -3.51275868e-09, 4.63143752e-10]], # [[-5.44633077e-07, -1.20271455e-12, -4.18264257e-13, 2.39241604e-11, # -4.50227415e-11, -3.80182613e-16, 1.31951856e-15, 1.12048280e-05, # -9.14135193e-07, -3.04333148e-09, 4.63143752e-10], [-5.44633077e-07, -1.36642933e-12, -4.18264257e-13, 2.39241604e-11, # -5.95511753e-11, -3.80182613e-16, 1.31951856e-15, 1.12048280e-05, # -1.24737547e-06, -3.51275868e-09, 3.95934039e-10], [-5.44633077e-07, -1.19703108e-12, -4.68934010e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 9.56458664e-06, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-6.39842967e-07, -1.16861072e-12, -4.99875775e-13, 2.99468780e-11, # -4.59645763e-11, -3.80182613e-16, 1.31951856e-15, 1.22546721e-05, # -1.24737547e-06, -3.51275868e-09, 3.99774267e-10], [-4.83667719e-07, -1.35470057e-12, -3.43369441e-13, 3.55667233e-11, # -4.58986235e-11, -4.14694018e-16, 1.07178297e-15, 1.18968421e-05, # -1.24737547e-06, -3.51275868e-09, 4.63143752e-10], [-4.83667719e-07, -1.10236447e-12, -3.84938708e-13, 3.55667233e-11, # -4.58986235e-11, -4.14694018e-16, 1.07178297e-15, 1.18968421e-05, # -1.24737547e-06, -3.51275868e-09, 4.63143752e-10], [-8.40329300e-07, -1.10236447e-12, -3.77583549e-13, 3.80533244e-11, # -5.95511753e-11, -4.33068710e-16, 1.04442382e-15, 1.12048280e-05, # -1.24737547e-06, -3.51275868e-09, 4.63143752e-10], [-6.97576965e-07, -1.10236447e-12, -3.77583549e-13, 3.53495489e-11, # -4.98126607e-11, -3.28389984e-16, 1.28575250e-15, 1.12048280e-05, # -1.02285608e-06, -2.51309092e-09, 4.63143752e-10], [-7.79272122e-07, -1.30569265e-12, -5.87274900e-13, 2.37665981e-11, # -4.90803748e-11, -4.52440023e-16, 1.07178297e-15, 1.12033515e-05, # -1.24737547e-06, -3.06770794e-09, 5.27615570e-10], [-3.39964904e-07, -1.10236447e-12, -3.84938708e-13, 3.55667233e-11, # -4.58986235e-11, -4.07639227e-16, 1.17606193e-15, 1.26235582e-05, # -1.24737547e-06, -3.51275868e-09, 3.89912552e-10]], # [[-4.83403217e-07, -1.10236447e-12, -4.68525963e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 9.56458664e-06, # -1.37536648e-06, -2.73531486e-09, 4.63143752e-10], [-5.44633077e-07, -1.19703108e-12, -3.84278163e-13, 3.55667233e-11, # -4.58986235e-11, -4.14694018e-16, 1.07178297e-15, 9.61580588e-06, # -1.40703891e-06, -3.51275868e-09, 3.46676792e-10], [-4.83667719e-07, -9.61167600e-13, -4.75525516e-13, 3.80533244e-11, # -5.95511753e-11, -4.33068710e-16, 1.04442382e-15, 1.12048280e-05, # -1.24737547e-06, -3.51275868e-09, 3.52731485e-10], [-8.40329300e-07, -1.35470057e-12, -2.81108854e-13, 3.55667233e-11, # -4.68376293e-11, -3.15898599e-16, 1.07178297e-15, 1.04413949e-05, # -1.24737547e-06, -3.48287720e-09, 4.63143752e-10], [-6.42892381e-07, -1.19703108e-12, -5.95258248e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 9.56458664e-06, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-5.44633077e-07, -1.19703108e-12, -4.68934010e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 1.06472678e-05, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-8.40329300e-07, -9.63617593e-13, -3.43369441e-13, 3.55667233e-11, # -4.58986235e-11, -4.14694018e-16, 1.07178297e-15, 1.18968421e-05, # -1.24737547e-06, -2.83150536e-09, 4.63143752e-10], [-4.83667719e-07, -1.30221563e-12, -3.77583549e-13, 3.80533244e-11, # -5.66736430e-11, -4.33068710e-16, 1.04442382e-15, 1.12048280e-05, # -8.92230466e-07, -4.38845243e-09, 4.63143752e-10], [-5.44633077e-07, -1.19703108e-12, -5.05459640e-13, 2.88644866e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 9.56458664e-06, # -1.24737547e-06, -2.73531486e-09, 4.70855596e-10], [-5.44633077e-07, -1.19703108e-12, -4.49775260e-13, 2.39241604e-11, # -7.03057492e-11, -3.58943653e-16, 1.31951856e-15, 9.56458664e-06, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10]], # [[-5.12322795e-07, -1.19703108e-12, -2.81108854e-13, 3.55667233e-11, # -4.68376293e-11, -3.46084978e-16, 1.07178297e-15, 1.04413949e-05, # -1.24737547e-06, -3.48287720e-09, 3.86607019e-10], [-8.40329300e-07, -1.35470057e-12, -4.68934010e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 1.06472678e-05, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-5.59464979e-07, -1.19703108e-12, -5.05459640e-13, 2.57960466e-11, # -4.46500146e-11, -3.58943653e-16, 1.29061814e-15, 7.09128830e-06, # -1.30447237e-06, -2.73531486e-09, 4.63143752e-10], [-5.44633077e-07, -1.19703108e-12, -4.68934010e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 1.18530923e-05, # -1.24737547e-06, -2.73531486e-09, 4.70855596e-10], [-5.44633077e-07, -8.44451621e-13, -5.05459640e-13, 2.88644866e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 1.23415324e-05, # -1.24737547e-06, -2.73531486e-09, 3.44753536e-10], [-5.74027233e-07, -1.19703108e-12, -4.43332373e-13, 2.39241604e-11, # -5.46485829e-11, -3.58943653e-16, 1.31951856e-15, 9.56458664e-06, # -9.24279032e-07, -2.73531486e-09, 4.70855596e-10], [-6.42892381e-07, -1.18295504e-12, -6.07369334e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 1.04413949e-05, # -1.24737547e-06, -3.00583951e-09, 4.63143752e-10], [-8.40329300e-07, -1.35470057e-12, -2.81108854e-13, 3.74465222e-11, # -4.69321375e-11, -3.15898599e-16, 9.31905186e-16, 9.56458664e-06, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-5.45942122e-07, -1.19703108e-12, -5.75268897e-13, 3.55667233e-11, # -4.68376293e-11, -3.15898599e-16, 1.07178297e-15, 1.04413949e-05, # -1.24737547e-06, -2.55514858e-09, 5.18259438e-10], [-8.40329300e-07, -1.35470057e-12, -2.81108854e-13, 2.39241604e-11, # -7.03057492e-11, -3.07869950e-16, 1.57419379e-15, 7.34252837e-06, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10]], # [[-6.42892381e-07, -1.35470057e-12, -4.52891908e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 1.06472678e-05, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-8.40329300e-07, -1.18295504e-12, -6.07369334e-13, 2.39241604e-11, # -6.77994995e-11, -3.58943653e-16, 1.32746183e-15, 1.04413949e-05, # -1.24737547e-06, -2.62409729e-09, 4.37036828e-10], [-8.40329300e-07, -1.35470057e-12, -2.81108854e-13, 3.74465222e-11, # -5.69819467e-11, -3.15898599e-16, 9.31905186e-16, 6.95747758e-06, # -1.16524813e-06, -2.73531486e-09, 4.63143752e-10], [-8.40329300e-07, -1.35470057e-12, -4.68934010e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.02096795e-15, 9.61010474e-06, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-6.53393221e-07, -1.35470057e-12, -2.81108854e-13, 3.40278946e-11, # -4.69321375e-11, -3.15898599e-16, 9.31905186e-16, 8.29301848e-06, # -1.24737547e-06, -1.99679141e-09, 4.63143752e-10], [-5.27689487e-07, -1.49294827e-12, -6.07369334e-13, 2.40707288e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 1.09865522e-05, # -1.41843314e-06, -3.00583951e-09, 4.63143752e-10], [-5.45942122e-07, -1.19703108e-12, -5.75268897e-13, 3.55667233e-11, # -5.58779583e-11, -3.15898599e-16, 1.07178297e-15, 7.83594416e-06, # -1.24737547e-06, -3.78282791e-09, 4.63143752e-10], [-6.42892381e-07, -1.18295504e-12, -5.90073168e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.31951856e-15, 1.22443817e-05, # -1.24737547e-06, -2.55514858e-09, 5.18259438e-10], [-8.40329300e-07, -1.12572397e-12, -2.81108854e-13, 3.74465222e-11, # -4.69321375e-11, -3.15898599e-16, 9.31905186e-16, 7.11807661e-06, # -1.54698705e-06, -3.43732095e-09, 4.63143752e-10], [-8.40329300e-07, -1.54140315e-12, -2.81108854e-13, 3.74465222e-11, # -4.77764667e-11, -3.15898599e-16, 6.79820115e-16, 9.56458664e-06, # -1.24737547e-06, -3.49960707e-09, 4.63143752e-10]], # [[-8.40329300e-07, -1.98642164e-12, -2.81108854e-13, 3.40278946e-11, # -3.93582345e-11, -3.15898599e-16, 9.31905186e-16, 9.20298385e-06, # -1.24737547e-06, -1.88682904e-09, 4.63143752e-10], [-4.72043420e-07, -1.16095565e-12, -2.81108854e-13, 3.74465222e-11, # -3.64731414e-11, -3.41857240e-16, 6.79820115e-16, 1.23656123e-05, # -9.72369481e-07, -3.49960707e-09, 4.63143752e-10], [-8.40329300e-07, -1.36423209e-12, -3.14859586e-13, 3.74465222e-11, # -5.69819467e-11, -3.15898599e-16, 9.31905186e-16, 6.95747758e-06, # -1.31358555e-06, -3.49960707e-09, 4.63143752e-10], [-8.40329300e-07, -1.54140315e-12, -2.33585387e-13, 4.86280599e-11, # -4.61408207e-11, -3.15898599e-16, 6.79820115e-16, 1.00405325e-05, # -1.16524813e-06, -2.73531486e-09, 4.86838768e-10], [-8.40329300e-07, -1.35470057e-12, -4.68934010e-13, 2.39241604e-11, # -7.12431334e-11, -4.05501196e-16, 1.02096795e-15, 9.61010474e-06, # -1.24737547e-06, -2.25941560e-09, 4.63143752e-10], [-9.40162118e-07, -1.53341476e-12, -4.68934010e-13, 2.39241604e-11, # -5.95511753e-11, -3.58943653e-16, 1.02096795e-15, 1.16604412e-05, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-8.40329300e-07, -1.35470057e-12, -2.81108854e-13, 3.27667844e-11, # -5.69819467e-11, -3.15898599e-16, 1.14912614e-15, 6.07780136e-06, # -1.16524813e-06, -2.73531486e-09, 5.43986555e-10], [-6.45936484e-07, -1.58494046e-12, -4.68934010e-13, 2.68875407e-11, # -5.95511753e-11, -3.58943653e-16, 1.02096795e-15, 9.61010474e-06, # -1.05126428e-06, -2.76422366e-09, 4.63143752e-10], [-8.40329300e-07, -1.54140315e-12, -2.81108854e-13, 4.64041513e-11, # -4.77764667e-11, -3.15898599e-16, 6.58432919e-16, 1.03329441e-05, # -1.24737547e-06, -3.49960707e-09, 4.63143752e-10], [-8.95233358e-07, -1.54140315e-12, -2.81108854e-13, 3.74465222e-11, # -5.64657872e-11, -3.15898599e-16, 5.13797506e-16, 9.56458664e-06, # -1.24737547e-06, -3.83177013e-09, 4.63143752e-10]], # [[-8.40329300e-07, -1.35470057e-12, -3.58123962e-13, 2.39241604e-11, # -7.12431334e-11, -4.05501196e-16, 1.02096795e-15, 7.41738928e-06, # -1.24737547e-06, -1.88682904e-09, 4.63143752e-10], [-9.50720025e-07, -1.40678665e-12, -2.56486387e-13, 3.40278946e-11, # -3.93582345e-11, -3.15898599e-16, 9.31905186e-16, 9.61010474e-06, # -1.24737547e-06, -2.25941560e-09, 4.63143752e-10], [-8.05251454e-07, -1.54140315e-12, -3.25553270e-13, 4.64041513e-11, # -5.89875298e-11, -2.69456587e-16, 6.58432919e-16, 1.29050940e-05, # -1.61434453e-06, -3.49960707e-09, 4.63143752e-10], [-8.40329300e-07, -1.58494046e-12, -4.68934010e-13, 2.72106416e-11, # -5.95511753e-11, -3.58943653e-16, 1.07746325e-15, 9.61010474e-06, # -1.05126428e-06, -2.46020767e-09, 3.90571865e-10], [-9.49856692e-07, -1.54140315e-12, -4.68934010e-13, 2.09968165e-11, # -7.12431334e-11, -4.05501196e-16, 7.43578117e-16, 9.61010474e-06, # -1.24083596e-06, -2.25941560e-09, 4.63143752e-10], [-8.40329300e-07, -1.09164774e-12, -2.81108854e-13, 4.64041513e-11, # -4.77764667e-11, -3.15898599e-16, 6.77195334e-16, 1.03329441e-05, # -1.24737547e-06, -4.08817750e-09, 3.85154635e-10], [-7.86838367e-07, -1.31530368e-12, -2.81108854e-13, 5.32119378e-11, # -4.77764667e-11, -3.15898599e-16, 5.63318692e-16, 1.03329441e-05, # -1.24737547e-06, -3.49960707e-09, 4.63143752e-10], [-8.40329300e-07, -1.54140315e-12, -3.60252257e-13, 3.52774826e-11, # -5.64657872e-11, -3.15898599e-16, 5.13797506e-16, 7.88820010e-06, # -1.24737547e-06, -3.83177013e-09, 4.63143752e-10], [-9.40162118e-07, -1.53341476e-12, -4.68934010e-13, 2.39241604e-11, # -4.70979945e-11, -3.58943653e-16, 8.27724866e-16, 1.16604412e-05, # -1.16711506e-06, -3.49960707e-09, 4.21948052e-10], [-8.40329300e-07, -1.54140315e-12, -2.12718685e-13, 4.64041513e-11, # -4.77764667e-11, -3.15898599e-16, 6.58432919e-16, 1.03329441e-05, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10]], # [[-7.86838367e-07, -1.31530368e-12, -2.81108854e-13, 5.07230602e-11, # -4.07394233e-11, -3.15898599e-16, 5.63318692e-16, 7.80194663e-06, # -1.41759282e-06, -1.88682904e-09, 4.63143752e-10], [-8.40329300e-07, -1.35470057e-12, -2.77811522e-13, 2.20449128e-11, # -6.96872805e-11, -2.98845177e-16, 1.02096795e-15, 7.41738928e-06, # -1.24737547e-06, -3.49960707e-09, 4.63143752e-10], [-8.40329300e-07, -1.54140315e-12, -2.12718685e-13, 2.72106416e-11, # -5.95511753e-11, -3.58943653e-16, 1.07746325e-15, 1.11691566e-05, # -1.05126428e-06, -2.46020767e-09, 3.78361138e-10], [-8.40329300e-07, -1.51892227e-12, -4.68934010e-13, 4.64041513e-11, # -4.77764667e-11, -3.71262179e-16, 5.03969183e-16, 1.03329441e-05, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-7.86838367e-07, -9.40615209e-13, -2.81108854e-13, 5.32119378e-11, # -4.77764667e-11, -3.15898599e-16, 5.63318692e-16, 1.03329441e-05, # -1.24737547e-06, -3.49960707e-09, 5.92791597e-10], [-7.86838367e-07, -1.31530368e-12, -3.02527986e-13, 4.76512686e-11, # -4.77764667e-11, -3.15898599e-16, 5.63318692e-16, 7.41205809e-06, # -1.24737547e-06, -3.49960707e-09, 5.85652787e-10], [-8.40329300e-07, -1.58494046e-12, -4.68934010e-13, 2.72106416e-11, # -5.95511753e-11, -3.58943653e-16, 8.27724866e-16, 1.16604412e-05, # -1.16711506e-06, -3.52394470e-09, 4.47519391e-10], [-9.40162118e-07, -1.93170233e-12, -6.06885750e-13, 2.39241604e-11, # -4.70979945e-11, -3.58943653e-16, 1.07746325e-15, 9.61010474e-06, # -1.05126428e-06, -2.01824728e-09, 3.39092859e-10], [-8.40329300e-07, -1.35470057e-12, -3.58123962e-13, 1.68284428e-11, # -5.13077576e-11, -4.05501196e-16, 1.02096795e-15, 7.41738928e-06, # -1.15955019e-06, -2.10053485e-09, 4.63143752e-10], [-8.40329300e-07, -1.35470057e-12, -3.58123962e-13, 2.39241604e-11, # -5.42297005e-11, -4.05501196e-16, 1.02096795e-15, 7.76165648e-06, # -1.24737547e-06, -1.88682904e-09, 4.63143752e-10]], # [[-8.40329300e-07, -1.04772498e-12, -2.12718685e-13, 2.32740615e-11, # -7.61065211e-11, -3.58943653e-16, 1.07746325e-15, 1.11691566e-05, # -9.06717443e-07, -3.02097718e-09, 3.48599750e-10], [-8.40329300e-07, -1.54140315e-12, -3.58123962e-13, 2.64958127e-11, # -5.42297005e-11, -4.05501196e-16, 9.03187590e-16, 7.76165648e-06, # -1.24737547e-06, -1.88682904e-09, 4.63143752e-10], [-8.40329300e-07, -1.35171182e-12, -3.58123962e-13, 2.39241604e-11, # -5.15408647e-11, -4.05501196e-16, 7.98122288e-16, 7.76165648e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-8.40329300e-07, -1.35470057e-12, -3.58123962e-13, 2.75829545e-11, # -5.42297005e-11, -4.05501196e-16, 9.03269713e-16, 7.76165648e-06, # -1.24737547e-06, -1.88682904e-09, 5.66426602e-10], [-8.40329300e-07, -1.51892227e-12, -4.68934010e-13, 4.64041513e-11, # -5.43197960e-11, -4.05501196e-16, 7.58329919e-16, 7.76165648e-06, # -1.24737547e-06, -1.88682904e-09, 4.07914841e-10], [-8.40329300e-07, -1.35470057e-12, -3.58123962e-13, 2.39241604e-11, # -5.42297005e-11, -3.71262179e-16, 5.03969183e-16, 1.03329441e-05, # -1.24737547e-06, -2.73531486e-09, 4.63143752e-10], [-8.40329300e-07, -1.35470057e-12, -3.58123962e-13, 2.39241604e-11, # -4.70979945e-11, -3.58943653e-16, 7.80631162e-16, 9.61010474e-06, # -1.05126428e-06, -2.01824728e-09, 3.39092859e-10], [-9.40162118e-07, -1.93170233e-12, -6.28062204e-13, 2.53421046e-11, # -5.42297005e-11, -3.39212248e-16, 1.02096795e-15, 7.76165648e-06, # -1.24737547e-06, -1.88682904e-09, 4.63143752e-10], [-8.40329300e-07, -1.51892227e-12, -4.68934010e-13, 4.64041513e-11, # -6.48316650e-11, -4.05501196e-16, 1.02096795e-15, 7.76165648e-06, # -1.24737547e-06, -1.88682904e-09, 4.63143752e-10], [-9.26619479e-07, -1.76108016e-12, -3.58123962e-13, 2.39241604e-11, # -4.10545412e-11, -3.09238219e-16, 5.03969183e-16, 1.03329441e-05, # -1.61255433e-06, -2.73531486e-09, 4.80835439e-10]], # [[-8.40329300e-07, -1.35171182e-12, -4.27247151e-13, 2.39241604e-11, # -5.24636504e-11, -4.05501196e-16, 7.98122288e-16, 7.76165648e-06, # -1.24737547e-06, -2.20564775e-09, 4.13039828e-10], [-8.40329300e-07, -1.38223047e-12, -3.58123962e-13, 2.39241604e-11, # -5.15408647e-11, -3.18992007e-16, 5.61084306e-16, 8.17632667e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-8.40329300e-07, -1.35470057e-12, -3.58123962e-13, 2.32836202e-11, # -4.70979945e-11, -3.58943653e-16, 8.68365974e-16, 1.08297060e-05, # -1.10151342e-06, -2.01824728e-09, 3.39092859e-10], [-9.24116569e-07, -1.35171182e-12, -3.47710215e-13, 2.39241604e-11, # -3.62212551e-11, -4.96594184e-16, 7.15993964e-16, 7.76165648e-06, # -1.55583788e-06, -2.19336583e-09, 4.61749199e-10], [-9.40162118e-07, -1.35171182e-12, -3.58123962e-13, 2.39241604e-11, # -5.15408647e-11, -4.05501196e-16, 5.89603687e-16, 7.76165648e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-8.40329300e-07, -1.93170233e-12, -6.28062204e-13, 2.53421046e-11, # -6.86822994e-11, -3.39212248e-16, 1.02096795e-15, 7.76165648e-06, # -1.24737547e-06, -1.42647825e-09, 3.46642036e-10], [-8.40329300e-07, -1.49418219e-12, -3.58123962e-13, 2.80146354e-11, # -5.15408647e-11, -5.18361179e-16, 7.98122288e-16, 5.44341656e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-8.40329300e-07, -1.35171182e-12, -3.58123962e-13, 2.40191257e-11, # -4.70979945e-11, -4.50006721e-16, 5.72881130e-16, 6.96745990e-06, # -1.05126428e-06, -2.01824728e-09, 3.39092859e-10], [-8.40329300e-07, -1.45536272e-12, -4.28365138e-13, 2.39241604e-11, # -5.15408647e-11, -4.05501196e-16, 7.98122288e-16, 7.76165648e-06, # -1.24737547e-06, -2.19336583e-09, 5.80482711e-10], [-8.40329300e-07, -1.54140315e-12, -3.58123962e-13, 2.64958127e-11, # -4.46525896e-11, -3.89071845e-16, 9.03187590e-16, 7.76165648e-06, # -1.24737547e-06, -1.35904831e-09, 4.81960205e-10]], # [[-7.30998197e-07, -1.35171182e-12, -3.58123962e-13, 2.39241604e-11, # -4.73102412e-11, -4.05501196e-16, 5.89603687e-16, 5.94058449e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-9.40162118e-07, -1.35171182e-12, -3.58123962e-13, 2.89475507e-11, # -5.15408647e-11, -4.05501196e-16, 6.56254413e-16, 7.76165648e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-9.40162118e-07, -1.38223047e-12, -3.58123962e-13, 2.39241604e-11, # -5.15408647e-11, -3.18992007e-16, 5.61084306e-16, 8.17632667e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-1.02849576e-06, -1.35171182e-12, -3.58123962e-13, 2.39241604e-11, # -5.89920587e-11, -2.94531071e-16, 5.89603687e-16, 7.76165648e-06, # -1.24737547e-06, -2.19336583e-09, 4.39713132e-10], [-9.15747427e-07, -1.49418219e-12, -3.58123962e-13, 2.92320519e-11, # -4.70932682e-11, -5.18361179e-16, 7.98122288e-16, 9.94288635e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-8.40329300e-07, -1.38223047e-12, -3.58123962e-13, 2.39241604e-11, # -5.15408647e-11, -3.18992007e-16, 5.61084306e-16, 5.44341656e-06, # -1.58725520e-06, -2.19336583e-09, 5.77050900e-10], [-8.40329300e-07, -1.38223047e-12, -3.58123962e-13, 2.39241604e-11, # -5.15408647e-11, -4.05501196e-16, 5.89603687e-16, 7.51797638e-06, # -1.24737547e-06, -2.61039521e-09, 4.63143752e-10], [-9.40162118e-07, -1.35171182e-12, -3.58123962e-13, 2.10854596e-11, # -5.15408647e-11, -3.18992007e-16, 5.61084306e-16, 9.26401189e-06, # -9.27744143e-07, -2.19336583e-09, 3.67598083e-10], [-9.40162118e-07, -1.35171182e-12, -4.56523465e-13, 2.39241604e-11, # -5.15408647e-11, -3.49565967e-16, 5.89603687e-16, 7.76165648e-06, # -1.24737547e-06, -1.77337431e-09, 3.95301998e-10], [-9.67588531e-07, -1.04547956e-12, -3.58123962e-13, 2.39241604e-11, # -5.96986050e-11, -3.18992007e-16, 5.61084306e-16, 8.17632667e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10]], # [[-1.23448138e-06, -1.04547956e-12, -3.97122548e-13, 2.39241604e-11, # -5.97270378e-11, -4.03015127e-16, 5.61084306e-16, 1.01583345e-05, # -1.24737547e-06, -2.19336583e-09, 4.10653067e-10], [-9.15747427e-07, -1.12456370e-12, -4.05027300e-13, 2.38474903e-11, # -5.97232866e-11, -5.18361179e-16, 7.98122288e-16, 8.53509166e-06, # -1.24737547e-06, -2.19336583e-09, 5.63472013e-10], [-8.40329300e-07, -1.38223047e-12, -3.58123962e-13, 1.91524984e-11, # -5.89296987e-11, -4.05501196e-16, 5.89603687e-16, 7.51797638e-06, # -1.24737547e-06, -2.61039521e-09, 5.26193927e-10], [-8.40329300e-07, -1.55765253e-12, -3.86165828e-13, 2.39241604e-11, # -5.15408647e-11, -4.27416624e-16, 6.37831900e-16, 8.33281948e-06, # -1.24737547e-06, -2.61039521e-09, 4.63143752e-10], [-9.15747427e-07, -1.49418219e-12, -3.58123962e-13, 3.09202129e-11, # -5.15408647e-11, -3.38979301e-16, 5.61084306e-16, 7.49634625e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-7.05054301e-07, -1.38223047e-12, -3.58123962e-13, 2.92320519e-11, # -4.70932682e-11, -3.96135685e-16, 7.98122288e-16, 1.11753822e-05, # -1.24737547e-06, -2.19336583e-09, 3.60278527e-10], [-9.15747427e-07, -1.35171182e-12, -3.58123962e-13, 2.89475507e-11, # -5.15408647e-11, -4.05501196e-16, 6.40677952e-16, 7.76165648e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-9.40162118e-07, -1.49418219e-12, -3.58123962e-13, 2.32398875e-11, # -4.70932682e-11, -5.18361179e-16, 7.98122288e-16, 9.94288635e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-7.30998197e-07, -1.35171182e-12, -3.71602417e-13, 2.63095198e-11, # -4.73102412e-11, -4.05501196e-16, 5.89603687e-16, 5.26431686e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-7.33561135e-07, -1.06809418e-12, -4.52387625e-13, 1.96961660e-11, # -5.15408647e-11, -4.09992528e-16, 5.61084306e-16, 8.17632667e-06, # -1.45454039e-06, -2.10429156e-09, 4.05757205e-10]], # [[-1.23448138e-06, -1.04547956e-12, -3.97122548e-13, 2.39241604e-11, # -5.97270378e-11, -4.03015127e-16, 7.13321515e-16, 7.35666981e-06, # -1.24737547e-06, -2.19336583e-09, 4.63143752e-10], [-9.15747427e-07, -1.16144072e-12, -4.19794286e-13, 2.44810130e-11, # -5.15408647e-11, -3.38979301e-16,
0.00611897, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781, 'Renaming Unit/Peak Dynamic': 4.56169, 'Renaming Unit/Runtime Dynamic': 0.668021, 'Renaming Unit/Subthreshold Leakage': 0.070483, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779, 'Runtime Dynamic': 8.45905, 'Subthreshold Leakage': 6.21877, 'Subthreshold Leakage with power gating': 2.58311}, {'Area': 32.6082, 'Execution Unit/Area': 8.2042, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.0694734, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.257256, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.615829, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.574946, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.122718, 'Execution Unit/Instruction Scheduler/Area': 2.17927, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.49245, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.852746, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.530795, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.87599, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.392351, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.460924, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 6.61073, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.116343, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0178517, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.145315, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.132024, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.261659, 'Execution Unit/Register Files/Runtime Dynamic': 0.149876, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.370175, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.00939, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155, 'Execution Unit/Runtime Dynamic': 4.32838, 'Execution Unit/Subthreshold Leakage': 1.83518, 'Execution Unit/Subthreshold Leakage with power gating': 0.709678, 'Gate Leakage': 0.372997, 'Instruction Fetch Unit/Area': 5.86007, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0028294, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0028294, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00248025, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00096881, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00189654, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0100356, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.026562, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0590479, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.126918, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.313334, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.431072, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.96874, 'Instruction Fetch Unit/Runtime Dynamic': 0.907922, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932587, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0289021, 'L2/Runtime Dynamic': 0.00619627, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80969, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 4.99104, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.80457, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0351387, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.121448, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.121448, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 5.56688, 'Load Store Unit/Runtime Dynamic': 2.52496, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.299471, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.598942, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591622, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283406, 'Memory Management Unit/Area': 0.434579, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.106283, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.106714, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00813591, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.399995, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0513166, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.742236, 'Memory Management Unit/Runtime Dynamic': 0.158031, 'Memory Management Unit/Subthreshold Leakage': 0.0769113, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462, 'Peak Dynamic': 26.4792, 'Renaming Unit/Area': 0.369768, 'Renaming Unit/FP Front End RAT/Area': 0.168486, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.405896, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925, 'Renaming Unit/Free List/Area': 0.0414755, 'Renaming Unit/Free List/Gate Leakage': 4.15911e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0401324, 'Renaming Unit/Free List/Runtime Dynamic': 0.0300654, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987, 'Renaming Unit/Gate Leakage': 0.00863632, 'Renaming Unit/Int Front End RAT/Area': 0.114751, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.249044, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
weights = array_ops.ones([array_ops.shape(embeddings)[0]], dtype=dtypes.float32) else: use_weight = True if weights.dtype != embeddings.dtype: weights = math_ops.cast(weights, embeddings.dtype) # Reshape weights to allow broadcast ones = array_ops.fill( array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1) bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0) orig_weights_shape = weights.get_shape() weights = array_ops.reshape(weights, bcast_weights_shape) # Set the weight shape, since after reshaping to bcast_weights_shape, # the shape becomes None. if embeddings.get_shape().ndims is not None: weights.set_shape( orig_weights_shape.concatenate( [1 for _ in range(embeddings.get_shape().ndims - 1)])) embeddings *= weights segment_ids_list = [segment_ids] for i in range(len(combiners) - 1): tmp_indices = math_ops.cast(indices[:, i + 1], dtypes.int32) segment_ids = segment_ids * math_ops.cast(dense_shape[i + 1], dtypes.int32) + tmp_indices segment_ids_list.append(segment_ids) for i in range(len(combiners)): axis = len(combiners) - i if not i == 0: cur_indices = indices[:, axis] embeddings, weights, segment_ids, cur_indice_offset = \ _get_valid_embeddings(embeddings, weights, segment_ids_list[axis - 1], cur_indices, segment_ids_list[axis]) else: cur_indice_offset = indices[:, axis] segment_ids = segment_ids_list[axis - 1] embeddings, weights = _internal_combine(embeddings, segment_ids, combiners[axis - 1], weights=weights, max_size=dense_shape[axis], seg_offset=cur_indice_offset, use_weight=use_weight and (weight_axis == axis), name=name + str(axis)) return embeddings, weights @tf_export("nn.safe_embedding_lookup_sparse", v1=[]) def safe_embedding_lookup_sparse_v2(embedding_weights, sparse_ids, sparse_weights=None, combiner="mean", default_id=None, max_norm=None, name=None): """Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of `P`. `embedding_weights` may be a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()` with a partitioner. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for `default_id` is returned, or the 0-vector if `default_id` is not supplied. The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. Note: when doing embedding lookup on `embedding_weights`, "div" partition strategy will be used. Support for other partition strategy will be added later. Args: embedding_weights: A list of `P` float `Tensor`s or values representing partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable` created by partitioning along dimension 0. The total unpartitioned shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size and `e_1, ..., e_m` are the embedding dimensions. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing float weights corresponding to `sparse_ids`, or `None` if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn", "tile" and "sum" are supported, with "mean" the default. default_id: The id to use for an entry with no features. max_norm: If not `None`, all embeddings are l2-normalized to max_norm before combining. name: A name for this operation (optional). Returns: Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`. Raises: ValueError: if `embedding_weights` is empty. """ return safe_embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights=sparse_weights, combiner=combiner, default_id=default_id, name=name, partition_strategy="div", max_norm=max_norm) @tf_export(v1=["nn.safe_embedding_lookup_sparse"]) def safe_embedding_lookup_sparse(embedding_weights, sparse_ids, sparse_weights=None, combiner="mean", default_id=None, name=None, partition_strategy="div", max_norm=None, prune=True): """Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of `P`. `embedding_weights` may be a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()` with a partitioner. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for `default_id` is returned, or the 0-vector if `default_id` is not supplied. The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. Args: embedding_weights: A list of `P` float `Tensor`s or values representing partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable` created by partitioning along dimension 0. The total unpartitioned shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size and `e_1, ..., e_m` are the embedding dimensions. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing float weights corresponding to `sparse_ids`, or `None` if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_id: The id to use for an entry with no features. name: A name for this operation (optional). partition_strategy: A string specifying the partitioning strategy. Currently `"div"` and `"mod"` are supported. Default is `"div"`. max_norm: If not `None`, all embeddings are l2-normalized to max_norm before combining. Returns: Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`. Raises: ValueError: if `embedding_weights` is empty. """ if embedding_weights is None: raise ValueError("Missing embedding_weights %s." % embedding_weights) if isinstance(embedding_weights, variables.PartitionedVariable): embedding_weights = list(embedding_weights) # get underlying Variables. if not isinstance(embedding_weights, list): embedding_weights = [embedding_weights] if len(embedding_weights) < 1: raise ValueError("Missing embedding_weights %s." % embedding_weights) dtype = sparse_weights.dtype if sparse_weights is not None else None tmp_embedding_weights = [] for w in embedding_weights: from tensorflow.python.ops.hash_table import hash_table if not isinstance(w, (hash_table.DistributedHashTable, hash_table.HashTable)): if not (isinstance(w, resource_variable_ops.ResourceVariable) and dtype in (None, w.dtype)): w = ops.convert_to_tensor(w, dtype=dtype) tmp_embedding_weights.append(w) embedding_weights = tmp_embedding_weights with ops.name_scope(name, "embedding_lookup", embedding_weights + [sparse_ids, sparse_weights]) as scope: # Reshape higher-rank sparse ids and weights to linear segment ids. original_shape = sparse_ids.dense_shape original_rank_dim = tensor_shape.dimension_value( sparse_ids.dense_shape.get_shape()[0]) original_rank = ( array_ops.size(original_shape) if original_rank_dim is None else original_rank_dim) sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [ math_ops.reduce_prod( array_ops.slice(original_shape, [0], [original_rank - 1])), array_ops.gather(original_shape, original_rank - 1) ]) if sparse_weights is not None: sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices, sparse_weights.values, sparse_ids.dense_shape) if prune: # Prune invalid ids and weights. sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights) if combiner != "sum": sparse_ids, sparse_weights = _prune_invalid_weights( sparse_ids, sparse_weights) # Fill in dummy values for empty features, if necessary. sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows( sparse_ids, default_id or 0) if sparse_weights is not None: sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0) result = embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights, combiner=combiner, partition_strategy=partition_strategy, name=None if default_id is None else scope, max_norm=max_norm) if default_id is None: # Broadcast is_row_empty to the same shape as embedding_lookup_result, # for use in Select. is_row_empty = array_ops.tile( array_ops.reshape(is_row_empty, [-1, 1]), array_ops.stack([1, array_ops.shape(result)[1]])) result = array_ops.where( is_row_empty, array_ops.zeros_like(result), result, name=scope) # Reshape back from linear ids back into higher-dimensional dense result. final_result = array_ops.reshape( result, array_ops.concat([ array_ops.slice( math_ops.cast(original_shape, dtypes.int32), [0], [original_rank - 1]), array_ops.slice(array_ops.shape(result), [1], [-1]) ], 0)) final_result.set_shape( tensor_shape.unknown_shape( (tensor_shape.Dimension(original_rank_dim) - 1).value).concatenate( result.get_shape()[1:])) return final_result def fused_safe_embedding_lookup_sparse(embedding_weights, sparse_ids, sparse_weights=None, combiner="mean", default_id=None, name=None, partition_strategy="div", max_norm=None, prune=True): """Functionally the same as safe_embedding_lookup_sparse but using fused embedding lookup ops in this method. """ logging.info("Is using fused embedding lookup for this scope {}".format(name)) if embedding_weights is None: raise ValueError("Missing embedding_weights %s." % embedding_weights) if isinstance(embedding_weights, variables.PartitionedVariable): embedding_weights = list(embedding_weights) # get underlying Variables. if not isinstance(embedding_weights, list): embedding_weights = [embedding_weights] if len(embedding_weights) < 1: raise ValueError("Missing embedding_weights %s." % embedding_weights) dtype = sparse_weights.dtype if sparse_weights is not None else None tmp_embedding_weights = [] for w in embedding_weights: from tensorflow.python.ops.hash_table import hash_table if not isinstance(w, (hash_table.DistributedHashTable, hash_table.HashTable)): if not (isinstance(w, resource_variable_ops.ResourceVariable) and dtype in (None, w.dtype)): w = ops.convert_to_tensor(w, dtype=dtype) tmp_embedding_weights.append(w) embedding_weights = tmp_embedding_weights with ops.name_scope(name, "fused_embedding_lookup", embedding_weights + [sparse_ids, sparse_weights]) as scope: # Reshape higher-rank sparse ids and weights to linear segment ids. original_shape = sparse_ids.dense_shape original_rank_dim = tensor_shape.dimension_value( sparse_ids.dense_shape.get_shape()[0]) original_rank = ( array_ops.size(original_shape) if original_rank_dim is None else original_rank_dim) sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [ math_ops.reduce_prod( array_ops.slice(original_shape, [0], [original_rank - 1])), array_ops.gather(original_shape, original_rank - 1) ]) if sparse_weights is not None: sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices, sparse_weights.values, sparse_ids.dense_shape) result = fused_embedding_ops.fused_embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights=sparse_weights, combiner=combiner, partition_strategy=partition_strategy, name=None if default_id is None else scope, max_norm=max_norm, default_id=default_id, prune_invalid_ids=True ) # Reshape back from linear ids back into higher-dimensional dense result. final_result = array_ops.reshape( result, array_ops.concat([ array_ops.slice( math_ops.cast(original_shape, dtypes.int32), [0], [original_rank - 1]), array_ops.slice(array_ops.shape(result), [1], [-1]) ], 0)) final_result.set_shape( tensor_shape.unknown_shape( (tensor_shape.Dimension(original_rank_dim) - 1).value).concatenate( result.get_shape()[1:]))
import csv from mongolia.constants import ID_KEY from mongolia.errors import DatabaseConflictError import re import xlrd from pytz import all_timezones from werkzeug import secure_filename from backend.admin_portal.common_helpers import raise_400_error from backend.incoming.new_user import new_user_via_admin_portal from backend.outgoing.dispatcher import (send_user_reactivation_message, send_user_pause_message, send_paused_cohort_user_restart_message) from conf.settings import DISABLE_USER_STATUS_CHANGE_MESSAGES from constants.cohorts import CohortStatus, FORBIDDEN_CUSTOM_ATTRIBUTES, ALLOWED_USER_UPLOAD_ATTRIBUTES from constants.exceptions import BadPhoneNumberError from constants.users import Status from database.backbone.cohorts import Cohort from database.tracking.users import User, Users from utils.formatters import phone_format from utils.logging import log_error from utils.time import now def change_merge_field_name_for_actions(actions, previous_attribute_name, new_attribute_name): """ This function iterates through actions and replaces any instances of merge fields of the previous attribute name with the new attribute name for set_attribute and send_message. For example, if we're editing an attribute name from "foo" to "bar", this would convert the text "Hello [[foo]]" to "Hello [[bar]]". This returns a boolean for whether or not a change has been made Conditionals and Questions are handled by getting those objects directly. """ save = False for action in actions: # for set_attribute if (action["action_name"] == "set_attribute" and action["params"]["attribute_name"] == previous_attribute_name): # change attribute name action["params"]["attribute_name"] = new_attribute_name # set flag save = True # for send_message elif action["action_name"] == "send_message": # convert text previous_text = action["params"]["text"] processed_text = change_merge_field_name_for_text(previous_text, previous_attribute_name, new_attribute_name) # if differences in text if previous_text != processed_text: # change object text action["params"]["text"] = processed_text # set save flag save = True return save def change_merge_field_name_for_text(text, previous_attribute_name, new_attribute_name): """ This function processes text and replaces any instances of merge fields of the previous attribute name with the new attribute name. For example, if we're editing an attribute name from "foo" to "bar", this would convert the text "Hello [[foo]]" to "Hello [[bar]]". """ # set up regex regex_pattern = r"\[\[[ ]?" + re.escape(previous_attribute_name) + "[ ]?\]\]" # the space is optional "[[thing]]" regex = re.compile(regex_pattern, re.IGNORECASE) # ignore case # do regex and replace with uppered new attribute name return regex.sub("[[%s]]" % new_attribute_name.upper(), text) def create_new_uploaded_attributes(custom_attributes, cohort): for attribute_name in custom_attributes: # TODO: refactor. Reason for the following two lines is timezone is top-level attribute, not custom attribute if attribute_name == "timezone": continue # if attribute not exist in custom attributes, create it if attribute_name and attribute_name not in cohort.get_custom_attributes(): default_value = "" cohort.add_custom_attribute(attribute_name, default_value) for user in Users(cohort_id=cohort[ID_KEY]): user.add_custom_attribute(attribute_name, default_value) return Cohort(cohort[ID_KEY]) def format_headers(headers): return [h.lower().strip() for h in headers] def parse_uploaded_user_file(user_file, cohort, option="safe", delay=True): """ :param option: "safe" or "unrestricted"; if option == "safe", use all validation elif option == "unrestricted" allow modification of data (bypass existing user check) """ curr_time = now() filename = secure_filename(user_file.filename) if filename.endswith(".csv"): # gets content from in-memory file user_file = user_file.read() # replace "/r" newline characters and filter out "" for correct csvreader parsing user_file = filter(None, user_file.replace("\r", "\n").split("\n")) user_csv_reader = csv.reader(user_file) headers = format_headers(user_csv_reader.next()) # validation header_errors = validate_headers(headers, cohort) if header_errors: return header_errors custom_attributes = headers[1:] # remove phone number errors = validate_contents(user_csv_reader, cohort[ID_KEY], custom_attributes, option) if errors: return errors # creation cohort = create_new_uploaded_attributes(custom_attributes, cohort) # create new csv.reader from file and skip headers user_csv_reader = csv.reader(user_file) user_csv_reader.next() for row in user_csv_reader: parse_user(row, cohort, custom_attributes, curr_time, delay) elif filename.endswith(".xls") or filename.endswith(".xlsx"): book = xlrd.open_workbook(file_contents=user_file.read()) sheet = book.sheet_by_index(0) headers = format_headers(sheet.row_values(0)) # validation header_errors = validate_headers(headers, cohort) if header_errors: return header_errors custom_attributes = headers[1:] # remove phone number errors = validate_contents(sheet, cohort[ID_KEY], custom_attributes, option, xls=True) if errors: return errors # creation cohort = create_new_uploaded_attributes(custom_attributes, cohort) for row_number in range(1, sheet.nrows): row = sheet.row_values(row_number) parse_user(row, cohort, custom_attributes, curr_time, delay) else: return raise_400_error("Please upload an Excel or CSV file.") def parse_user(row, cohort, custom_attributes, curr_time, delay): phonenumber = row[0] # the next two lines are necessary because excel represents phone numbers as floats if isinstance(phonenumber, float): phonenumber = str(int(phonenumber)) # check if user exists phonenumber = phone_format(str(phonenumber)) user = User.retrieve(phonenum=phonenumber, cohort_id=cohort[ID_KEY]) if not user: user = new_user_via_admin_portal(phonenumber, cohort[ID_KEY], curr_time, delay=delay) # spreadsheet index is 1-indexed for column in range(1, len(custom_attributes) + 1): # dictionary is 0-indexed custom_attribute_name = custom_attributes[column - 1] # grab value for that column and row, else set as empty string row_value = row[column] # convert floats to int if is_integer() because xlsx will convert numerics to floats # NOTE: corner case where the admin intended for the value to actually be a float and this messes with that if isinstance(row_value, float): if row_value.is_integer(): row_value = str(int(row_value)) else: row_value = str(row_value) # if string, strip whitespace if isinstance(row_value, basestring): # can't just do string casting since xlsx will convert ints to floats row_value = row_value.strip() custom_attribute_value = row_value if row_value != "" else "" if custom_attribute_name == "timezone": user["timezone"] = custom_attribute_value # set user attribute in customer attributes dictionary user["custom_attributes"][custom_attribute_name] = unicode(custom_attribute_value) user.save() def send_status_message(user): if user.get_cohort()["status"] == CohortStatus.active and not DISABLE_USER_STATUS_CHANGE_MESSAGES: if user["status"] == Status.active: send_user_reactivation_message(user) elif user["status"] == Status.paused: send_user_pause_message(user) elif user["status"] == Status.disabled: pass # let twilio handle it elif user.get_cohort()["status"] == CohortStatus.paused and user["status"] == Status.active: send_paused_cohort_user_restart_message(user) def update_merge_fields_in_object(object, previous_attribute_name, new_attribute_name): """ Updates attribute names for Question, Conditional, and Schedule objects given a previous_attribute_name and new_attribute_name. Will only save if changes are made. Returns True if changes are made, False if changes are not. """ # save flag to determine if database call to save is needed save = False # for objects with text if "text" in object: # change merge fields in text previous_text = object["text"] processed_text = change_merge_field_name_for_text(previous_text, previous_attribute_name, new_attribute_name) # if differences in text if previous_text != processed_text: # change object text object["text"] = processed_text # set save flag save = True # for objects with attribute if "attribute" in object: if object["attribute"] == previous_attribute_name: # change attribute name object["attribute"] = new_attribute_name # set save flag save = True # for objects with actions if "actions" in object: # change merge fields in actions actions = object["actions"] # this function will return True if changes have been made changed = change_merge_field_name_for_actions(actions, previous_attribute_name, new_attribute_name) # if differences, set save flag if changed: save = True # if save flag is True, save if save: object.save() return True else: return False def validate_contents(user_file, cohort_id, custom_attributes, option, xls=False): errors = { "bad_phonenumbers": [], "existing_users": [], "unknown": [], "phone_number_repeats": [], "timezone_errors": [], "phonenum_list": {}, # Exists to check for repeated phone number in upload } # if xls (excel) file if xls: for index in range(1, user_file.nrows): row = user_file.row_values(index) row_number = index + 1 errors = validate_row(row, row_number, cohort_id, custom_attributes, errors, option) # if csv file else: for row in user_file: row_number = user_file.line_num + 1 errors = validate_row(row, row_number, cohort_id, custom_attributes, errors, option) errors.pop("phonenum_list") # This isn't an error - just used to check for repeated phone numbers # if errors, return if any([True if value else False for key, value in errors.items()]): return errors def validate_headers(headers, cohort): header_errors = {"headers": []} if headers[0].replace(" ", "") != "phonenumber": header_errors["headers"].append("'Phone Number' is a required first column header. As a consequence, note that user phone numbers belong in the first column.") for column in range(1, len(headers)): if headers[column] in FORBIDDEN_CUSTOM_ATTRIBUTES and headers[column] not in ALLOWED_USER_UPLOAD_ATTRIBUTES: error = "Attribute '%s' is not an allowed custom user attribute." % headers[column].upper() header_errors["headers"].append(error) if header_errors["headers"]: return header_errors def validate_user_attribute(new_attribute_name, new_default_value, cohort, previous_attribute_name=False, previous_default_value=False): """ Validates creation or editing of user attributes. Will raise 400 error if problems and return True if no problems """ # generic validation # disallow this action on completed cohorts if cohort["status"] == CohortStatus.completed: raise_400_error("This action is not allowed on a cohort that is already completed.", "error") # disallow periods in attribute name if "." in new_attribute_name: raise_400_error(("Sorry, periods '.' are not allowed in attribute names. Underscores are recommended " + "instead: '%s'") % new_attribute_name.replace(".", "_"), "error") if new_attribute_name[0] == "$": raise_400_error("Sorry, attribute names cannot begin with the character '$'.", "error") # disallow forbidden names for new attribute name if new_attribute_name in FORBIDDEN_CUSTOM_ATTRIBUTES: raise_400_error("The attribute '%s' already exists or is not allowed." % new_attribute_name.upper()) # validation for new attribute if not previous_attribute_name and not previous_default_value: # disallow duplicate attribute names if new_attribute_name
pressence of a risk-free assets (Maximum Sharpe Ratio) """ n = er.shape[0] # the optimizer needs an initial point init_guess = np.repeat(1/n, n) # don't want to go lower than zero and not higher than 1 (no shorting, no leverage) bounds = ((0.0, 1.0),) * n # multiplying a tuple generates multiple tuples weights_sum_to_1 = { 'type': 'eq', 'fun': lambda weights: np.sum(weights) - 1 } results = scipy.optimize.minimize(neg_sharpe_ratio, # function to maximize by negation init_guess, # starting point args=(riskfree_rate, er, cov,), # argument required by the function to minimize method="SLSQP", # quadratic programming algorithm options={'disp':False}, # don't show solver output constraints=(weights_sum_to_1), # problem constraints bounds=bounds) # bounds on the solution return results.x def neg_sharpe_ratio(weights, riskfree_rate, er, cov): """ Returns the negative of the Sharpe Ratio under given weights """ r = portfolio_return(weights, er) vol = portfolio_vol(weights, cov) return -(r-riskfree_rate)/vol def get_total_market_index_returns_1(): ind_return = get_ind_returns() ind_nfirms = get_ind_n_firms() ind_size = get_ind_size() # In this section we are going to build a market index # Compute market capitalization ind_mktcap = ind_nfirms * ind_size # Compute total market capitalization total_mktcap = ind_mktcap.sum(axis='columns') total_mktcap.plot() # Compute the capitalization weight. This calculates the participation of each industry # in the total market capitalization ind_capweight = ind_mktcap.divide(total_mktcap, axis='rows') ind_capweight[['Fin', 'Steel']].plot(figsize=(12, 6)) # Weighted average of returns, whole market total_market_return = (ind_capweight * ind_return).sum(axis='columns') return total_market_return def get_total_market_index_returns(): """ Load the 30 industry portfolio data and derive the returns of a capweighted total market index """ ind_nfirms = get_ind_n_firms() ind_size = get_ind_size() ind_return = get_ind_returns() ind_mktcap = ind_nfirms * ind_size total_mktcap = ind_mktcap.sum(axis=1) ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows") total_market_return = (ind_capweight * ind_return).sum(axis="columns") return total_market_return def run_ccpi(risky_r, safe_r=None, m =3, start=1000, floor=0.8, riskfree_rate=00.03, drawdown=None): """ Run a backtest of the CPPI stategy, given a set of returns for the risky asset Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History :return: """ dates = risky_r.index n_steps = len(dates) floor_value = start * floor peak = start if isinstance(risky_r, pd.Series): risky_r = pd.DataFrame(risky_r, columns=['R']) if safe_r is None: safe_r = pd.DataFrame().reindex_like(risky_r) safe_r.values[:] = riskfree_rate/12 account_history = pd.DataFrame().reindex_like(risky_r) cushion_history = pd.DataFrame().reindex_like(risky_r) risky_w_history = pd.DataFrame().reindex_like(risky_r) account_value = start for step in range(0, n_steps): if drawdown is not None: peak = np.maximum(peak, account_value) floor_value = peak * (1-drawdown) cushion = (account_value - floor_value) / account_value risky_w = m * cushion risky_w = np.minimum(risky_w, 1) risky_w = np.maximum(risky_w, 0) safe_w = 1 - risky_w risky_alloc = account_value * risky_w safe_alloc = account_value * safe_w # update the account value account_value = risky_alloc * (1 + risky_r.iloc[step]) + safe_alloc * (1 + safe_r.iloc[step]) # save the values to be analyzed later cushion_history.iloc[step] = cushion risky_w_history.iloc[step] = risky_w account_history.iloc[step] = account_value risky_wealth = start*(1+risky_r).cumprod() backtest_result = { 'Wealth': account_history, 'Risky Wealth': risky_wealth, 'Risky Budget': cushion_history, 'Risky Allocation': risky_w_history, 'm': m, 'start': start, 'floor': floor, 'risky_r': risky_r, 'safe_r': safe_r } return backtest_result def summary_stats(r, riskfree_rate=0.03): """ Return a DataFrame that contains aggregated summary stats for the returns in the columns of r """ ann_r = r.aggregate(annualize_rets, periods_per_year=12) ann_vol = r.aggregate(annualize_vol, periods_per_year=12) ann_sr = r.aggregate(sharpe_ratio, riskfree_rate=riskfree_rate, periods_per_year=12) dd = r.aggregate(lambda r: drawdown(r).drawdown.min()) skew = r.aggregate(skewness) kurt = r.aggregate(kurtosis) cf_var5 = r.aggregate(var_gaussian, modified=True) hist_cvar5 = r.aggregate(cvar_historic) return pd.DataFrame({ "Annualized Return": ann_r, "Annualized Vol": ann_vol, "Skewness": skew, "Kurtosis": kurt, "Cornish-Fisher VaR (5%)": cf_var5, "Historic CVaR (5%)": hist_cvar5, "Sharpe Ratio": ann_sr, "Max Drawdown": dd }) def gbm0(n_years=10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100): """ Evolution of a Stock Price using a Geometric Brownian Motion generator :param n_years: :param n_scenarios: :param mu: :param sigma: :return: """ dt = 1/steps_per_year n_steps = int(n_years*steps_per_year) # Now it's time to generate the random part of the model xi = np.random.normal(size=(n_steps, n_scenarios)) rets = mu*dt + sigma*np.sqrt(dt)*xi rets = pd.DataFrame(rets) # Now returns to prices prices = s_0*(1+rets).cumprod() return prices def gbm(n_years=10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100, prices = True): """ Evolution of a Stock Price using a Geometric Brownian Motion generator. A more efficient version in the random number generation part. :param n_years: :param n_scenarios: :param mu: :param sigma: :return: """ # Derive per-step Model Parameters from User Specifications dt = 1/steps_per_year n_steps = int(n_years*steps_per_year) + 1 # the standard way ... # rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios)) # without discretization error ... rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios)) rets_plus_1[0] = 1 ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1 return ret_val def discout(t, r): """ Compute the price of a pure discount bond that pays a dollar at time t, given interest rate r :param t: :param r: :return: """ return (1+r)**(-t) def pv(flows, r): """ Compute the present value of a sequence of cash flows given by the time (as an index) and amounts r can be a scalar, or a Series or DataFrame with the number of rows matching the num of rows in flows """ dates = flows.index discounts = discount(dates, r) return discounts.multiply(flows, axis='rows').sum() def funding_ratio(assets, liabilities, r): """Computes the funding ratio of some assets given liabilities and interest rates""" return assets/pv(liabilities, r) def funding_ratio(assets, liabilities, r): """ Computes the funding ratio of a series of liabilities, based on an interest rate and current value of assets """ return pv(assets, r)/pv(liabilities, r) def inst_to_ann(r): """ Converts short rate to an annualized rate :param r: :return: """ return np.expm1(r) def ann_to_inst(r): """ Convert annualized to a short rate :param r: :return: """ return np.log1p(r) import math def cir(n_years=10, n_scenarios=1, a=0.05, b=0.03, sigma=0.05, steps_per_year=12, r_0=None): """ Generate random interest rate evolution over time using the CIR model b and r_0 are assumed to be the annualized rates, not the short rate and the returned values are the annualized rates as well """ if r_0 is None: r_0 = b r_0 = ann_to_inst(r_0) dt = 1 / steps_per_year num_steps = int(n_years * steps_per_year) + 1 # because n_years might be a float shock = np.random.normal(0, scale=np.sqrt(dt), size=(num_steps, n_scenarios)) rates = np.empty_like(shock) rates[0] = r_0 ## For Price Generation h = math.sqrt(a ** 2 + 2 * sigma ** 2) prices = np.empty_like(shock) #### def price(ttm, r): _A = ((2 * h * math.exp((h + a) * ttm / 2)) / (2 * h + (h + a) * (math.exp(h * ttm) - 1))) ** ( 2 * a * b / sigma ** 2) _B = (2 * (math.exp(h * ttm) - 1)) / (2 * h + (h + a) * (math.exp(h * ttm) - 1)) _P = _A * np.exp(-_B * r) return _P prices[0] = price(n_years, r_0) #### for step in range(1, num_steps): r_t = rates[step - 1] d_r_t = a * (b - r_t) * dt + sigma * np.sqrt(r_t) * shock[step] rates[step] = abs(r_t + d_r_t) # generate prices at time t as well ... prices[step] = price(n_years - step * dt, rates[step]) rates = pd.DataFrame(data=inst_to_ann(rates), index=range(num_steps)) ### for prices prices = pd.DataFrame(data=prices, index=range(num_steps)) ### return rates, prices def bond_cash_flows(maturity, principal=100, coupon_rate=0.03, coupons_per_year=12): """ Returns the series of cash flows generated by a bond, indexed by the payment/coupon number """ n_coupons = round(maturity*coupons_per_year) coupon_amt = principal*coupon_rate/coupons_per_year coupon_times = np.arange(1, n_coupons+1) cash_flows = pd.Series(data=coupon_amt, index=coupon_times) cash_flows.iloc[-1] += principal # add the principal to the last payment return cash_flows def bond_price(maturity, principal=100, coupon_rate=0.03, coupons_per_year=12, discount_rate=0.03): """ Computes the price of a bond that pays regular coupons until maturity at which time the principal and the final coupon is returned This is not designed to be efficient, rather, it is to illustrate the underlying principle behind bond pricing! """ cash_flows = bond_cash_flows(maturity, principal, coupon_rate, coupons_per_year) return pv(cash_flows, discount_rate/coupons_per_year) def macaulay_duration(flows, discount_rate): """ Computes the Macaulay Duration of a sequence of cash flows, given a per-period discount rate """ discounted_flows = discount(flows.index, discount_rate)*pd.DataFrame(flows) weights = discounted_flows/discounted_flows.sum() return np.average(flows.index, weights=weights.iloc[:,0]) def discount(t, r): """ Compute the price of a pure discount bond that pays a dollar at time period t and r is the per-period interest rate returns a |t| x |r| Series or DataFrame r can be
''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_PT_overlay_edit_mesh_measurement(bpy_types.Panel, bpy_types._GenericUI): bl_label = None ''' ''' bl_parent_id = None ''' ''' bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def poll(self, context): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_PT_overlay_edit_mesh_normals(bpy_types.Panel, bpy_types._GenericUI): bl_label = None ''' ''' bl_parent_id = None ''' ''' bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def poll(self, context): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_PT_overlay_edit_mesh_shading(bpy_types.Panel, bpy_types._GenericUI): bl_label = None ''' ''' bl_parent_id = None ''' ''' bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def poll(self, context): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_PT_overlay_geometry(bpy_types.Panel, bpy_types._GenericUI): bl_label = None ''' ''' bl_parent_id = None ''' ''' bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_PT_overlay_gpencil_options(bpy_types.Panel, bpy_types._GenericUI): bl_label = None ''' ''' bl_parent_id = None ''' ''' bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_header(self, context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def poll(self, context): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_PT_overlay_guides(bpy_types.Panel, bpy_types._GenericUI): bl_label = None ''' ''' bl_parent_id = None ''' ''' bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_PT_overlay_motion_tracking(bpy_types.Panel, bpy_types._GenericUI): bl_label = None ''' ''' bl_parent_id = None ''' ''' bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def
dropout: bool, layers: int = 1, activation: str = "relu", d_ff: Optional[int] = None, d_k: Optional[int] = None, rpr_k: Optional[Union[int, List[int]]] = None, layer_norms_after: bool = False, layer_norm_eps: float = 1.0e-6, embeddings_reduction: str = 'sum', **kwargs, ): super().__init__() self.embeddings = EmbeddingsStack(embeddings, dropout, reduction=embeddings_reduction) self.weight_std = kwargs.get('weight_std', 0.02) assert self.embeddings.dsz == d_model self.transformer = TransformerEncoderStack( num_heads, d_model=d_model, pdrop=dropout, scale=True, layers=layers, activation=activation, d_ff=d_ff, rpr_k=rpr_k, d_k=d_k, layer_norms_after=layer_norms_after, layer_norm_eps=layer_norm_eps ) self.proj_to_output = pytorch_linear(d_model, 1) self.apply(self.init_layer_weights) self.lengths_feature = kwargs.get('lengths_feature', list(self.embeddings.keys())[0]) def init_layer_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)): module.weight.data.normal_(mean=0.0, std=self.weight_std) if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None: module.bias.data.zero_() def forward(self, features): embedded = self.embeddings(features) x = features[self.lengths_feature] input_mask = torch.zeros(x.shape, device=x.device, dtype=torch.long).masked_fill(x != Offsets.PAD, 1).unsqueeze(1).unsqueeze(1) transformer_out = self.transformer((embedded, input_mask)) binary = self.proj_to_output(transformer_out) return torch.sigmoid(binary) def create_loss(self): return nn.BCELoss(reduction="none") class PooledSequenceCriterion(nn.Module): def __init__(self, LossFn=nn.BCEWithLogitsLoss, avg='token'): super().__init__() if avg == 'token': self.crit = LossFn() self._norm = self._no_norm else: self.crit = LossFn() self._norm = self._batch_norm def _batch_norm(self, loss, inputs): return loss / inputs.size()[0] def _no_norm(self, loss, inputs): return loss def forward(self, inputs, targets): """Evaluate some loss over a sequence. :param inputs: torch.FloatTensor, [B, C] The scores from the model. Batch First :param targets: torch.LongTensor, The labels. :returns: torch.FloatTensor, The loss. """ #inputs = inputs.transpose(0, 1) C = inputs.shape[-1] flat_targets = torch.nn.functional.one_hot(targets, C) # Get the offsets of the non-zero targets, the values of these are all on flat_targets = (torch.sum(flat_targets, axis=1) != 0).float() flat_targets[:, Offsets.PAD] = 0 flat_targets[:, Offsets.EOS] = 0 flat_targets[:, Offsets.GO] = 0 if len(inputs.shape) > 2: max_per_vocab = inputs.max(0)[0] loss = self.crit(max_per_vocab, flat_targets) else: loss = self.crit(inputs, flat_targets) return self._norm(loss, inputs) class SequenceCriterion(nn.Module): def __init__(self, LossFn=nn.NLLLoss, avg='token'): super().__init__() if avg == 'token': # self.crit = LossFn(ignore_index=Offsets.PAD, reduction='elementwise-mean') self.crit = LossFn(ignore_index=Offsets.PAD, size_average=True) self._norm = self._no_norm else: self.crit = LossFn(ignore_index=Offsets.PAD, size_average=False) self._norm = self._batch_norm def _batch_norm(self, loss, inputs): return loss / inputs.size()[0] def _no_norm(self, loss, inputs): return loss def forward(self, inputs, targets): """Evaluate some loss over a sequence. :param inputs: torch.FloatTensor, [B, .., C] The scores from the model. Batch First :param targets: torch.LongTensor, The labels. :returns: torch.FloatTensor, The loss. """ total_sz = targets.nelement() loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz)) return self._norm(loss, inputs) def pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0, initializer=None, stride=1, bias=True, groups=1): c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding, stride=stride, bias=bias, groups=groups) if unif > 0: c.weight.data.uniform_(-unif, unif) elif initializer == "ortho": nn.init.orthogonal_(c.weight) if bias: nn.init.constant_(c.bias, 0) elif initializer == "he" or initializer == "kaiming": nn.init.kaiming_uniform_(c.weight) if bias: nn.init.constant_(c.bias, 0) elif initializer == "normal": nn.init.normal(mean=0, std=unif) if bias: nn.init.constant_(c.bias, 0) else: nn.init.xavier_uniform_(c.weight) if bias: nn.init.constant_(c.bias, 0) return c def tie_weight(to_layer, from_layer): """Assigns a weight object to the layer weights. This method exists to duplicate baseline functionality across packages. :param to_layer: the pytorch layer to assign weights to :param from_layer: pytorch layer to retrieve weights from """ to_layer.weight = from_layer.weight class BilinearAttention(nn.Module): def __init__(self, in_hsz: int, out_hsz: int = 1, bias_x: bool = True, bias_y: bool = True): super().__init__() self.in_hsz = in_hsz self.out_hsz = out_hsz self.bias_x = bias_x self.bias_y = bias_y a1 = in_hsz a2 = in_hsz if self.bias_x: a1 += 1 if self.bias_y: a2 += 1 self.weight = nn.Parameter(torch.Tensor(out_hsz, in_hsz + bias_x, in_hsz + bias_y)) self.reset_parameters() def reset_parameters(self): nn.init.zeros_(self.weight) #nn.init.orthogonal_(self.weight) def forward(self, x, y, mask): r""" Args: x: ``[B, T, H]``. y: ``[B, T, H]``. Returns: ~torch.Tensor: A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``. If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically. """ if self.bias_x is True: ones = torch.ones(x.shape[:-1] + (1,), device=x.device) x = torch.cat([x, ones], -1) if self.bias_y is True: ones = torch.ones(x.shape[:-1] + (1,), device=y.device) y = torch.cat([y, ones], -1) x = x.unsqueeze(1) y = y.unsqueeze(1) u = x @ self.weight s = u @ y.transpose(-2, -1) if self.out_hsz == 1: s = s.squeeze(1) s = s.masked_fill((mask.bool() == MASK_FALSE).unsqueeze(1), -1e9) return s class TripletLoss(nn.Module): """Provide a Triplet Loss using the reversed batch for negatives""" def __init__(self, model): super().__init__() self.score = nn.CosineSimilarity(dim=1) self.model = model def forward(self, inputs, targets): # reverse the batch and use as a negative example neg = targets.flip(0) query = self.model.encode_query(inputs) response = self.model.encode_response(targets) neg_response = self.model.encode_response(neg) pos_score = self.score(query, response) neg_score = self.score(query, neg_response) score = neg_score - pos_score score = score.masked_fill(score < 0.0, 0.0).sum(0) return score class ContrastiveLoss(nn.Module): def __init__(self, model, t=1.0, train_temperature=True): super().__init__() self.model = model if t is None: t = 1.0 self.t = nn.Parameter(torch.tensor(t).float(), requires_grad=train_temperature) def forward(self, inputs, targets): query = self.model.encode_query(inputs) # [B, H] response = self.model.encode_response(targets) # [B, H] query = F.normalize(query, p=2, dim=1) response = F.normalize(response, p=2, dim=1) labels = torch.arange(query.shape[0], device=query.device) logits = torch.mm(query, response.T) * self.t.exp() loss = F.cross_entropy(logits, labels) return loss class SymmetricContrastiveLoss(nn.Module): def __init__(self, model, t=1.0, train_temperature=True): super().__init__() self.model = model if t is None: t = 1.0 self.t = nn.Parameter(torch.tensor(t).float(), requires_grad=train_temperature) def forward(self, inputs, targets): query = self.model.encode_query(inputs) # [B, H] response = self.model.encode_response(targets) # [B, H] query = F.normalize(query, p=2, dim=1) response = F.normalize(response, p=2, dim=1) labels = torch.arange(query.shape[0], device=query.device) logits = torch.mm(query, response.T) * self.t.exp() loss_1 = F.cross_entropy(logits, labels) loss_2 = F.cross_entropy(logits.T, labels) loss = (loss_1 + loss_2) * 0.5 return loss class AllLoss(nn.Module): def __init__(self, model, warmup_steps=10000, reduction_type='sum'): r"""Loss from here https://arxiv.org/pdf/1705.00652.pdf see section 4 We want to minimize the negative log prob of y given x -log P(y|x) P(y|x) P(x) = P(x, y) Chain Rule of Probability P(y|x) = P(x, y) / P(x) Algebra P(y|x) = P(x, y) / \sum_\hat(y) P(x, y = \hat(y)) Marginalize over all possible ys to get the probability of x P_approx(y|x) = P(x, y) / \sum_i^k P(x, y_k) Approximate the Marginalization by just using the ys in the batch S(x, y) is the score (cosine similarity between x and y in this case) from our neural network P(x, y) = e^S(x, y) P(y|x) = e^S(x, y) / \sum_i^k e^S(x, y_k) log P(y|x) = log( e^S(x, y) / \sum_i^k e^S(x, y_k)) log P(y|x) = S(x, y) - log \sum_i^k e^S(x, y_k) -log P(y|x) = -(S(x, y) - log \sum_i^k e^S(x, y_k)) """ super().__init__() self.score = nn.CosineSimilarity(dim=-1) self.model = model self.max_scale = math.sqrt(self.model.embeddings.output_dim) self.steps = 0 self.warmup_steps = warmup_steps self.reduction = torch.mean if reduction_type == 'mean' else torch.sum def forward(self, inputs, targets): # This is the cosine distance annealing referred to in https://arxiv.org/pdf/1911.03688.pdf fract = min(self.steps / self.warmup_steps, 1) c = (self.max_scale-1) * fract + 1 self.steps += 1 # These will get broadcast to [B, B, H] query = self.model.encode_query(inputs).unsqueeze(1) # [B, 1, H] response = self.model.encode_response(targets).unsqueeze(0) # [1, B, H] # all_scores is now a batch x batch matrix where index (i, j) is the score between # the i^th x vector and the j^th y vector all_score = c * self.score(query, response) # [B, B] # The diagonal has the scores of correct pair, (i, i) pos_score = torch.diag(all_score) # vec_log_sum_exp will calculate the batched log_sum_exp in a numerically stable way # the result is a [B, 1] vector which we squeeze to make it [B] to match the diag # Because we are minimizing the negative log we turned the division into a subtraction here loss = pos_score - vec_log_sum_exp(all_score, -1).squeeze() # Batch loss loss = self.reduction(loss) # minimize the negative loss return -loss class CosineSimilarityLoss(nn.Module): def __init__(self, neg_value=0.3, pos_value=0.8): super().__init__() self.pos_value = pos_value self.neg_value = neg_value def forward(self, embeddings_reduction, labels): hsz = int(embeddings_reduction.shape[-1]//2) label_values = torch.zeros_like(labels, dtype=torch.float) label_values[labels == 0] = self.neg_value label_values[labels == 1] = self.pos_value output = torch.cosine_similarity(embeddings_reduction[:,:hsz], embeddings_reduction[:,hsz:]) loss = F.mse_loss(output, label_values.view(-1), reduction='mean') return loss class OnlineContrastiveLoss(nn.Module): def __init__(self): super().__init__() def forward(self, embeddings_reduction, labels): hsz = int(embeddings_reduction.shape[-1]//2) x = embeddings_reduction[:,:hsz] y = embeddings_reduction[:,hsz:] distance_matrix = 1-F.cosine_similarity(x, y) negs = distance_matrix[labels == 0] poss = distance_matrix[labels == 1] # select hard positive and hard negative pairs negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())] positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())] positive_loss = positive_pairs.pow(2).sum() negative_loss = F.relu(0.5 - negative_pairs).pow(2).sum() loss = positive_loss + negative_loss return loss class TwoHeadConcat(AttentionReduction): """Use two parallel SingleHeadReduction, and concatenate
import idc import idaapi from idc import * from idaapi import * import idautils def SIGNEXT(x, b): m = 1 << (b - 1) x = x & ((1 << b) - 1) return (x ^ m) - m BPF_INST_SIZE = 8 BPF_MAX_OPERAND_COUNT = 3 class BPF_CLASS: BPF_LD = 0x00 BPF_LDX = 0x01 BPF_ST = 0x02 BPF_STX = 0x03 BPF_ALU = 0x04 BPF_JMP = 0x05 BPF_RET = 0x06 BPF_MISC = 0x07 class BPF_SIZE: BPF_W = 0x00 BPF_H = 0x08 BPF_B = 0x10 class BPF_MODE: BPF_IMM = 0x00 BPF_ABS = 0x20 BPF_IND = 0x40 BPF_MEM = 0x60 BPF_LEN = 0x80 BPF_MSH = 0xa0 class BPF_OP: BPF_ADD = 0x00 BPF_SUB = 0x10 BPF_MUL = 0x20 BPF_DIV = 0x30 BPF_OR = 0x40 BPF_AND = 0x50 BPF_LSH = 0x60 BPF_RSH = 0x70 BPF_NEG = 0x80 BPF_MOD = 0x90 BPF_XOR = 0xa0 BPF_JA = 0x00 BPF_JEQ = 0x10 BPF_JGT = 0x20 BPF_JGE = 0x30 BPF_JSET = 0x40 class BPF_SRC: BPF_K = 0x00 BPF_X = 0x08 class BPF_RVAL: BPF_K = 0x00 BPF_X = 0x08 BPF_A = 0x10 class BPF_MISCOP: BPF_TAX = 0x00 BPF_TXA = 0x80 class BPFi(object): class Code(object): def __init__(self, code): self.kls = (code) & 0x07 self.size = (code) & 0x18 self.mode = (code) & 0xe0 self.op = (code) & 0xf0 self.src = (code) & 0x08 self.rval = (code) & 0x18 self.miscop = (code) & 0xf8 def __init__(self, c, t, f, k): self.c = c self.i = BPFi.Code(c) self.jt = t self.jf = f self.k = k class BpfProc(processor_t): NEGATIVE_BRANCH = 0x00 POSITIVE_BRANCH = 0x01 M_BASE = 0x04 FORCE_ENUM = 0x0b id = 0x8000 + 8888 flag = PR_ADJSEGS | PRN_HEX cnbits = 8 dnbits = 8 psnames = ["bpf"] plnames = ["BPF"] segreg_size = 0 instruc_start = 0 assembler = { 'header': [".bpf"], "flag": AS_NCHRE | ASH_HEXF0 | ASD_DECF0 | ASO_OCTF0 | ASB_BINF0 | AS_NOTAB, "uflag": 0, "name": "b-p-f", "origin": ".org", "end": ".end", "cmnt": ";", "ascsep": '"', "accsep": "'", "esccodes": "\"'", "a_ascii": ".ascii", "a_byte": ".byte", "a_word": ".word", "a_dword": ".dword", "a_bss": "dfs %s", "a_seg": "seg", "a_curip": "PC", "a_public": "", "a_weak": "", "a_extrn": ".extern", "a_comdef": "", "a_align": ".align", "lbrace": "(", "rbrace": ")", "a_mod": "%", "a_band": "&", "a_bor": "|", "a_xor": "^", "a_bnot": "~", "a_shl": "<<", "a_shr": ">>", "a_sizeof_fmt": "size %s", } def emu_operand(self, op, insn, feature): if op.type == o_mem: # dodata2(0, op.addr, op.dtyp) # insn.create_op_data(0, op.addr, op.dtyp) add_dref(0, op.addr, dr_R) elif op.type == o_near: # name label accordincly; can only be labels from branch instructions assert op.addr % 8 == 0, 'unaligned address at offset '+op.addr n = '@_{}'.format(op.addr/8) MakeNameEx(op.addr, n, SN_AUTO) add_cref(insn.ea, op.addr, fl_JN) def notify_emu(self, insn): feature = insn.get_canon_feature() for i in range(BPF_MAX_OPERAND_COUNT): # max operand count oprnd = insn[i] if oprnd.type == o_void: break # no more operands self.emu_operand(oprnd, insn, feature) if not feature & CF_STOP: add_cref(insn.ea, insn.ea + insn.size, fl_F) return True def notify_out_operand(self, ctx, op): out_symbol = ctx.out_symbol OutValue = ctx.out_value out_name_expr = ctx.out_name_expr out_register = ctx.out_register QueueSet = remember_problem outLong = ctx.out_long out_tagoff = ctx.out_tagoff out_tagon = ctx.out_tagon OutLong = ctx.out_long if op.type == o_phrase: opv = op_t() opv.type = o_imm opv.value = 4 OutValue(opv, OOFW_32) # no prefix out_symbol('*') out_symbol('(') out_symbol('[') OutValue(op, OOFW_32) # no prefix out_symbol(']') out_symbol('&') opv.value = 0x0f OutValue(opv, OOFW_32) # no prefix out_symbol(')') return True if op.type == o_displ: out_symbol('[') out_register(self.reg_names[op.reg]) out_symbol('+') OutValue(op, OOFW_32) # no prefix out_symbol(']') return True if op.type == o_reg: out_register(self.reg_names[op.reg]) return True if op.type == o_imm: # out_symbol('#') OutValue(op, OOFW_32) # no prefix # out_symbol(']') return True if op.type in [o_mem]: if op.specval & BpfProc.M_BASE: # is scrath memory out_register('M') out_symbol('[') OutValue(op, OOF_ADDR) # no prefix out_symbol(']') return True if op.type in [o_near]: r = out_name_expr(op, op.addr, BADADDR) if not r: out_tagon(COLOR_ERROR) OutLong(op.addr, 16) out_tagoff(COLOR_ERROR) QueueSet(Q_noName, self.cmd.ea) return True return False def notify_out_insn(self, ctx): feature = ctx.insn.get_canon_feature() ctx.out_mnemonic() if feature & CF_USE1: ctx.out_one_operand(0) if feature & CF_USE2: ctx.out_char(',') ctx.out_char(' ') ctx.out_one_operand(1) if feature & CF_USE3: ctx.out_char(',') ctx.out_char(' ') ctx.out_one_operand(2) ctx.set_gen_cmt() ctx.flush_outbuf() return def notify_ana(self, insn): insn.size = BPF_INST_SIZE ea = insn.ea c = get_word(ea) jt = get_wide_byte(ea+2) jf = get_wide_byte(ea+3) k = get_wide_dword(ea+4) print c, jt, jf, k bi = BPFi(c, jt, jf, k) # initialize operands to voids for i in range(BPF_MAX_OPERAND_COUNT+1): insn[i].type = o_void # set the instruction index insn.itype = 0 # TODO ROMOVE op_count = 0 inscls = bi.i.kls if inscls == BPF_CLASS.BPF_MISC: op_count = self.decode_misc(bi, insn) if inscls == BPF_CLASS.BPF_RET: op_count = self.decode_ret(bi, insn) elif inscls in [BPF_CLASS.BPF_LD, BPF_CLASS.BPF_LDX]: op_count = self.decode_ld(bi, insn) elif inscls == BPF_CLASS.BPF_JMP: op_count = self.decode_jmp(bi, insn) elif inscls in [BPF_CLASS.BPF_ST, BPF_CLASS.BPF_STX]: op_count = self.decode_store(bi, insn) elif inscls == BPF_CLASS.BPF_ALU: op_count = self.decode_alu(bi, insn) assert op_count <= BPF_MAX_OPERAND_COUNT, 'operand count of {} exceeds max of {}'.format( op_count, BPF_MAX_OPERAND_COUNT) return insn.size def __init__(self): processor_t.__init__(self) self.reg_names = [ "A", "x", "len", # virutal "CS", "M" ] self.reg_first_sreg = self.reg_names.index("CS") self.reg_code_sreg = self.reg_names.index("CS") self.reg_last_sreg = self.reg_names.index("M") self.reg_data_sreg = self.reg_names.index("M") self.instruc = [ # ALU {'name': 'add', 'feature': CF_USE1 | CF_USE2}, {'name': 'sub', 'feature': CF_USE1 | CF_USE2}, {'name': 'mul', 'feature': CF_USE1 | CF_USE2}, {'name': 'div', 'feature': CF_USE1 | CF_USE2}, {'name': 'or', 'feature': CF_USE1 | CF_USE2}, {'name': 'and', 'feature': CF_USE1 | CF_USE2}, {'name': 'lsh', 'feature': CF_USE1 | CF_USE2}, {'name': 'rsh', 'feature': CF_USE1 | CF_USE2}, {'name': 'neg', 'feature': CF_USE1}, {'name': 'mod', 'feature': CF_USE1 | CF_USE2}, {'name': 'xor', 'feature': CF_USE1 | CF_USE2}, # MISC {'name': 'tax', 'feature': 0}, {'name': 'txa', 'feature': 0}, # STORE {'name': 'stx', 'feature': CF_USE1 | CF_CHG1}, {'name': 'st', 'feature': CF_USE1 | CF_CHG1}, # LOAD {'name': 'ldx', 'feature': CF_USE1}, {'name': 'ld', 'feature': CF_USE1}, {'name': 'ldh', 'feature': CF_USE1}, {'name': 'ldb', 'feature': CF_USE1}, # BRANCH {'name': 'jne', 'feature': CF_STOP | CF_USE1 | CF_USE2 | CF_USE3}, {'name': 'jeq', 'feature': CF_STOP | CF_USE1 | CF_USE2 | CF_USE3}, {'name': 'jle', 'feature': CF_STOP | CF_USE1 | CF_USE2 | CF_USE3}, {'name': 'jgt', 'feature': CF_STOP | CF_USE1 | CF_USE2 | CF_USE3}, {'name': 'jlt', 'feature': CF_STOP | CF_USE1 | CF_USE2 | CF_USE3}, {'name': 'jge', 'feature': CF_STOP | CF_USE1 | CF_USE2 | CF_USE3}, {'name': 'jnset', 'feature': CF_STOP | CF_USE1 | CF_USE2 | CF_USE3}, {'name': 'jset', 'feature': CF_STOP | CF_USE1 | CF_USE2 | CF_USE3}, {'name': 'jmp', 'feature': CF_STOP | CF_USE1}, # RETURN {'name': 'ret', 'feature': CF_STOP | CF_USE1}, ] self.instruc_end = len(self.instruc) self.iname2index = {} for idx, ins in enumerate(self.instruc): self.iname2index[ins['name']] = idx def decode_ld(self, bi, cmd): c = bi.i isldx = c.kls == BPF_CLASS.BPF_LDX if isldx: cmd.itype = self.iname2index['ldx'] else: cmd.itype = self.iname2index['ld'+{ BPF_SIZE.BPF_W:'', BPF_SIZE.BPF_H:'h', BPF_SIZE.BPF_B:'b' }[c.size]] if not isldx: if c.mode == BPF_MODE.BPF_ABS: cmd[0].type = o_mem cmd[0].dtyp = dt_dword cmd[0].addr = bi.k return 1 if c.mode == BPF_MODE.BPF_IND: cmd[0].type = o_displ cmd[0].dtyp = dt_dword cmd[0].value = SIGNEXT(bi.k,32) cmd[0].reg = self.reg_names.index('x') return 1 else: if c.mode == BPF_MODE.BPF_MSH: cmd[0].type = o_phrase cmd[0].dtyp = dt_dword cmd[0].value = SIGNEXT(bi.k,32) return 1 if isldx or (not isldx and c.size == BPF_SIZE.BPF_W): if c.mode == BPF_MODE.BPF_IMM: cmd[0].type = o_imm cmd[0].dtyp = dt_dword cmd[0].value = bi.k return 1 if c.mode == BPF_MODE.BPF_LEN: cmd[0].type = o_reg cmd[0].dtyp = dt_dword cmd[0].reg = self.reg_names.index('len') return 1 if c.mode == BPF_MODE.BPF_MEM: cmd[0].type = o_mem cmd[0].dtyp = dt_dword cmd[0].addr = bi.k cmd[0].specval |= BpfProc.M_BASE # M as base return 1 def decode_ret(self, bi, cmd): cmd.itype = self.iname2index['ret'] if bi.i.rval == BPF_RVAL.BPF_K: cmd[0].type = o_imm cmd[0].dtyp = dt_dword cmd[0].value = bi.k cmd[0].specval |= BpfProc.FORCE_ENUM # todo: defined values for seccomp? elif bi.i.rval == BPF_RVAL.BPF_A: cmd[0].type = o_reg cmd[0].dtyp = dt_dword cmd[0].reg = self.reg_names.index('A') else: pass # X not supported return 1 def decode_jmp(self, bi, cmd): c = bi.i curr_off = cmd.ea + cmd.size cmd.itype = self.iname2index[{ BPF_OP.BPF_JA:'jmp', BPF_OP.BPF_JEQ:'jeq', BPF_OP.BPF_JGE:'jge', BPF_OP.BPF_JGT:'jgt', BPF_OP.BPF_JSET:'jset' }[c.op]] if c.op == BPF_OP.BPF_JA: cmd[0].type = o_near cmd[0].dtyp = dt_dword cmd[0].addr = curr_off + bi.k* BPF_INST_SIZE return 1 immi = 0 jti = 1 jfi = 2 if bi.jt == 0: # if the true offset == 0, then use fake negative compares so arrows would be parsed correctly jfi = 1 jti
<reponame>Rangarajbk072/Azure_DMS #/***This Artifact belongs to the Data Migration Jumpstart Engineering Team***/ #!/bin/python36 # $Id: expimpmysql.py 208 2019-11-29 00:21:53Z bpahlawa $ # Created 22-NOV-2019 # $Author: bpahlawa $ # $Date: 2019-11-29 11:21:53 +1100 (Fri, 29 Nov 2019) $ # $Revision: 208 $ import re from string import * import pymysql from sqlalchemy.dialects.mysql import LONGTEXT from io import StringIO,BytesIO from struct import pack import gzip import subprocess import configparser import os import getopt import sys import subprocess import threading import time import getpass import base64 import random import signal import io import glob import logging import datetime import readline import shutil from itertools import (takewhile,repeat) from multiprocessing import Pool, Manager #global datetime dtnow=None #foreign key script's filename crfkeyfilename='crforeignkey.sql' #other key script's filename crokeyfilename='crotherkey.sql' #create table script's filename crtblfilename='crtable.sql' #create trigger script's filename crtrigfilename='crtrigger.sql' #create sequence script's filename crseqfilename='crsequences.sql' #create view script's filename crviewfilename='crviews.sql' #create analyze db report cranalyzedbfilename='analyzedb' #spool out all schema_information crallinfo='allinfo' #create proc and func script's filename crprocfuncfilename='crprocfunc.sql' #create table file handle crtblfile=None #import cursor impcursor=None #import connection impconnection=None #export connection expconnection=None #mode either export or import mode=None #config file handle config=None #export chunk of rows exprowchunk=None #import chunk of rows improwchunk=None #import tables imptables=None #export tables exptables=None #config filename configfile=None #signal handler handler=None #total proc totalproc=0 #cursort tableinfo curtblinfo=None #export max rows per file expmaxrowsperfile=None expdatabase=None #report file afile=None sqlanalyzetableinfo=""" select table_schema, table_name, table_type, ROW_FORMAT, TABLE_ROWS, AVG_ROW_LENGTH, DATA_LENGTH, INDEX_LENGTH from information_schema.tables where table_schema = '{0}' order by 2,1,4,3; """ sqlanalyzeprocfuncinfo=""" select SPECIFIC_NAME, ROUTINE_CATALOG, ROUTINE_NAME, ROUTINE_TYPE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION, CHARACTER_SET_NAME, COLLATION_NAME, DTD_IDENTIFIER, ROUTINE_BODY, EXTERNAL_NAME, EXTERNAL_LANGUAGE, PARAMETER_STYLE, IS_DETERMINISTIC, SQL_DATA_ACCESS, SQL_PATH, SECURITY_TYPE, CREATED, LAST_ALTERED, SQL_MODE, ROUTINE_COMMENT, DEFINER, CHARACTER_SET_CLIENT, COLLATION_CONNECTION, DATABASE_COLLATION from information_schema.routines where routine_schema='{0}' """ sqlanalyzeplugin=""" select plugin_name,plugin_version,plugin_type,plugin_maturity, load_option,plugin_license,plugin_author,plugin_description from information_schema.all_plugins where plugin_name not like 'INNODB%' and plugin_status='ACTIVE' """ sqlanalyzelanguage="select lanname from pg_catalog.pg_language order by 1" sqlanalyzeusedfeature=""" SELECT rolname,proname,lanname,proname,typname FROM pg_catalog.pg_namespace n JOIN pg_catalog.pg_authid a ON nspowner = a.oid JOIN pg_catalog.pg_proc p ON pronamespace = n.oid JOIN pg_catalog.pg_type t ON typnamespace = n.oid JOIN pg_catalog.pg_language l on prolang = l.oid where nspname in (select schema_name from information_schema.schemata where schema_name not in ('pg_catalog','information_schema','sys','dbo')) """ #SQL Statement for creating triggers sqlcreatetrigger="show triggers" sqllistprocfuncs=""" select routine_type,routine_name from information_schema.routines where routine_schema = '{0}' order by routine_name """ sqllistparams=""" select concat('CREATE ',routine_type,' ',specific_name,'(') cr ,concat(parameter_mode,' ',parameter_name,' ',dtd_identifier) param from information_schema.parameters where specific_schema='{0}' and specific_name='{1}' and routine_type='{2}' """ #SQL Statement for creating foreign keys sqlcreatefkey=""" SELECT 'ALTER TABLE '||nspname||'.'||relname||' ADD CONSTRAINT '||conname||' '|| pg_get_constraintdef(pg_constraint.oid)||';' FROM pg_constraint INNER JOIN pg_class ON conrelid=pg_class.oid INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace and pg_namespace.nspname not in ('sys') where pg_get_constraintdef(pg_constraint.oid) {0} '%FOREIGN KEY%' ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END DESC,contype DESC,nspname DESC,relname DESC,conname DESC """ #SQL Statement for creating sequence sqlcreatesequence="show create sequence" #Statement for listing all tables sqllisttables="show full tables where table_type='BASE TABLE'" #Statement for creating table sqlcreatetable="show create table" #List name of tables and their sizes sqltableinfo="""select table_name,round(((data_length + index_length) / 1024 / 1024), 2) rowsz from information_schema.tables where table_schema='{0}' and table_type='BASE TABLE'""" #procedure to trap signal def trap_signal(signum, stack): logging.info("Ctrl-C has been pressed!") sys.exit(0) #procedure to count number of rows def rawincount(filename): f = gzip.open(filename, 'rt') bufgen = takewhile(lambda x: x, (f.read(8192*1024) for _ in repeat(None))) return sum( buf.count('\n') for buf in bufgen ) #procedure for crypting password def Crypt(string,key,encrypt=1): random.seed(key) alphabet = 2 * " AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz1234567890.;:,'?/|{}[]-=+_!@#$%^&*()<>`~" lenalpha = int(len(alphabet)/2) if encrypt: return ''.join([alphabet[alphabet.index(string[p]) + lenalpha - int(lenalpha * random.random())] for p in range(len(string))]) else: return ''.join([alphabet[alphabet.index(string[p]) - lenalpha + int(lenalpha * random.random())] for p in range(len(string))]) #procedure to encode password def encode_password(thepass): return(Crypt(thepass,'<PASSWORD>')) #procedure to decode password def decode_password(thepass): return(Crypt(thepass,'<PASSWORD>',encrypt=0)) #procedure to read configuration from mysqlconfig.ini file def read_config(section,key): global config,configfile try: value=config[section][key] return(value) except (Exception,configparser.Error) as error: logging.error("\033[1;31;40mread_config: Error in reading config "+configfile, str(error)) sys.exit(2) #procedure to generate forign keys creation script def generate_create_fkey(): global curtblinfo,expdatabase try: curtblinfo.execute(sqlcreatefkey.format('like')) rows=curtblinfo.fetchall() fkeyfile = open(expdatabase+"/"+crfkeyfilename,"w") for row in rows: fkeyfile.write(row[0]+"\n") except Exception as error: logging.error('\033[1;31;40mgenerate_create_fkey: Error occured: '+str(error)) sys.exit(2) finally: if (fkeyfile): fkeyfile.close() #procedure to generate sequences creation script def generate_create_sequence(): logging.info("Generating create sequence script...") global curtblinfo,expdatabase try: listofsequence=[] curtblinfo.execute("show full tables where table_type='SEQUENCE'") rows=curtblinfo.fetchall() for row in rows: listofsequence.append(row[0]) fseqfile = open(expdatabase+"/"+crseqfilename,"w") for sequence_name in listofsequence: curtblinfo.execute(sqlcreatesequence+" "+sequence_name) rows=curtblinfo.fetchall() for row in rows: fseqfile.write(row[1]+";\n") except Exception as error: logging.error('\033[1;31;40mgenerate_create_sequence: Error occured: '+str(error)) sys.exit(2) finally: if (fseqfile): fseqfile.close() #procedure to generate views creation script def generate_create_view(): logging.info("Generating create view script...") global curtblinfo,expdatabase try: listofview=[] curtblinfo.execute("show full tables where table_type='VIEW'") rows=curtblinfo.fetchall() for row in rows: listofview.append(row[0]) fviewfile = open(expdatabase+"/"+crviewfilename,"w") for view_name in listofview: curtblinfo.execute("show create view "+view_name) rows=curtblinfo.fetchall() for row in rows: fviewfile.write(row[1]+";\n") except Exception as error: logging.error('\033[1;31;40mgenerate_create_view: Error occured: '+str(error)) sys.exit(2) finally: if (fviewfile): fviewfile.close() #procedure to generate procedure creation script def generate_create_proc_and_func(): logging.info("Generating create procedure and function script...") global curtblinfo,expdatabase try: listofprocfunc=[] curtblinfo.execute(sqllistprocfuncs.format(expdatabase)) rows=curtblinfo.fetchall() for row in rows: listofprocfunc.append("SHOW CREATE "+row[0]+" "+row[1]) fprocfuncfile = open(expdatabase+"/"+crprocfuncfilename,"w") i=0 for procfuncname in listofprocfunc: curtblinfo.execute(procfuncname) rows=curtblinfo.fetchall() for row in rows: if (row[2]==None): logging.info("missing privilege \"grant select on mysql.proc to thisuser\", skipping create procedure and function...") fprocfuncfile.close() return if (i==0): fprocfuncfile.write("delimiter ;;\n") i+=1 fprocfuncfile.write(row[2]+"\n") fprocfuncfile.write(";;\n") except Exception as error: logging.error('\033[1;31;40mgenerate_create_proc_and_func: Error occured: '+str(error)) sys.exit(2) finally: if (fprocfuncfile): fprocfuncfile.close() #procedure to generate triggers creation script def generate_create_trigger(): logging.info("Generating create trigger script...") global curtblinfo,expdatabase try: listoftrigger=[] curtblinfo.execute(sqlcreatetrigger) rows=curtblinfo.fetchall() for row in rows: listoftrigger.append(row[0]) trigfile = open(expdatabase+"/"+crtrigfilename,"w") for trigger_name in listoftrigger: curtblinfo.execute("show create trigger "+trigger_name) rows=curtblinfo.fetchall() for row in rows: trigfile.write(row[2]+";\n") except Exception as error: logging.error('\033[1;33;40mgenerate_create_trigger: Error occured: '+str(error)) sys.exit(2) finally: if (trigfile): trigfile.close() #procedure to generate other keys creation script def generate_create_okey(): global curtblinfo,expdatabase try: curtblinfo.execute(sqlcreatefkey.format('not like')) rows=curtblinfo.fetchall() okeyfile = open(expdatabase+"/"+crokeyfilename,"w") for row in rows: okeyfile.write(row[0]+"\n") except (Exception,configparser.Error) as error: logging.error('\033[1;33;40mgenereate_create_okey: Error occured: '+str(error)) sys.exit(2) finally: if (okeyfile): okeyfile.close() #procedure to generate tables creation script def generate_create_table(tablename): global curtblinfo global crtblfile try: curtblinfo.execute(sqlcreatetable+" "+tablename) rows=curtblinfo.fetchall() for row in rows: crtblfile.write(row[1]+";\n") except (Exception,configparser.Error) as error: logging.error('\033[1;33;40mgenerate_create_table: Error occured: '+str(error)) pass #procedure to create table def create_table(): global impconnection,expdatabase curcrtable=impconnection.cursor() createtable="" crtblfailed=[] logging.info("Creating tables from the script") try: fcrtable = open(expdatabase+"/"+crtblfilename,"r") curcrtable.execute("SET FOREIGN_KEY_CHECKS=0;") for line in fcrtable.readlines(): if line.find(";") != -1: try: curcrtable.execute(createtable+line) impconnection.commit() except (Exception,configparser.Error) as error: if str(error).find("Foreign key constraint is incorrectly formed"): crtblfailed.append(createtable+line) elif not str(error[1]).find("already exists"): logging.error("\033[1;31;40m"+str(error)) else: logging.error('create_table: Error occured: '+str(error)) impconnection.rollback() pass createtable="" else: if createtable=="": logging.info("\033[1;33;40mExecuting...."+line) createtable+=line fcrtable.close() createtable="" curcrtable.execute("SET FOREIGN_KEY_CHECKS=1;") except Exception as error: logging.error('\033[1;31;40mcreate_table: Error occured: '+str(error)) #procedure to create table's keys def create_table_keys(): global impconnection,expdatabase curcrtablekeys=impconnection.cursor() createtablekeys="" logging.info("Creating table's KEYs from the script") try: fcrokey = open(expdatabase+"/"+crokeyfilename,"r") for line in fcrokey.readlines(): if line.find(");"): try: curcrtablekeys.execute(createtablekeys+line) impconnection.commit() except (Exception,configparser.Error) as error: if not str(error).find("already exists"): logging.error('create_table_keys: Error occured: '+str(error)) else: logging.error("\033[1;31;40m"+str(error)) impconnection.rollback() pass createtablekeys="" else: if createtablekeys=="": logging.info("\033[1;33;40mExecuting...."+line) createtablekeys+=line fcrokey.close() except Exception as error: loggin.error('create_table_keys: Error occured: '+str(error)) #procedure to create sequences def create_sequences(): global impconnection,expdatabase curcrsequences=impconnection.cursor() createsequences="" logging.info("Creating sequences from the script") try: crseqs = open(expdatabase+"/"+crseqfilename,"r") for line in crseqs.readlines(): if line.find(");"): try: curcrsequences.execute(createsequences+line) impconnection.commit() except (Exception,configparser.Error) as error: logging.info('create_sequences: Error occured: '+str(error)) impconnection.rollback() pass createsequences="" else: if createsequences=="": logging.info("\033[1;33;40mExecuting...."+line) createsequences+=line crseqs.close() except Exception as error: logging.error('\033[1;31;40mcreate_sequences: Error occured: '+str(error)) #procedure to re-create foreign keys from the generated script def recreate_fkeys(): global impconnection,expdatabase curfkeys=impconnection.cursor() createfkeys="" logging.info("Re-creating table's FOREIGN KEYs from the script") try: fcrfkey = open(expdatabase+"/"+crfkeyfilename,"r") for line in fcrfkey.readlines(): if line.find(");"): try: curfkeys.execute(createfkeys+line) impconnection.commit() logging.info(createfkeys+line+"....OK") except (Exception,pymysql.Error) as error: if not str(error).find("already exists"): logging.info('recreate_fkeys: Error occured: '+str(error)) else: logging.error("\033[1;31;40m"+str(error)) impconnection.rollback() pass createfkeys="" else: if createfkeys=="": logging.info("\033[1;33;40mExecuting...."+line) createfkeys+=line fcrfkey.close() curfkeys.close() except Exception as error: logging.error('\033[1;31;40mrecreate_fkeys: Error occured: '+str(error)) #preparing text def prepare_text(dat): cpy = StringIO() for row in dat: cpy.write('\t'.join([str(x).replace('\t','\\t').replace('\n','\\n').replace('\r','\\r').replace('None','\\N') for x in row]) + '\n') return(cpy) #insert data into table def insert_data(tablename): global impconnection global impcursor cpy = StringIO() thequery = "select * from " + tablename impcursor.execute(thequery) i=0 while True: i+=1 records = impcursor.fetchmany(improwchunk) if not records: break cpy = prepare_text(records) if (i==1): cpy.seek(0) impcursor.copy_from(cpy,tablename) logging.info("Inserted "+str(i*improwchunk)+" rows so far") impcursor.close() #insert data from file def insert_data_from_file(tablefile,impuser,imppass,impserver,impport,impcharset,impdatabase,improwchunk,dirname): try: filename=tablefile+".csv.gz" tablename=".".join(tablefile.split(".")[0:2]) insconnection=pymysql.connect(user=impuser, password=<PASSWORD>, host=impserver, port=int(impport), charset=impcharset, database=impdatabase,local_infile=True) curinsdata=insconnection.cursor() logging.info("Inserting data from \033[1;34;40m"+filename+"\033[1;37;40m to table \033[1;34;40m"+tablename) if os.path.isfile(dirname+"/"+filename): with gzip.open(dirname+"/"+filename,"rb") as f_in: with open(dirname+"/"+tablefile+".csv","wb") as f_out: shutil.copyfileobj(f_in,f_out) else: logging.info("File "+dirname+"/"+filename+" doesnt exist!, so skipping import to table "+tablename) insconnection.rollback() return() curinsdata.execute("SET FOREIGN_KEY_CHECKS=0;") curinsdata.execute("LOAD DATA LOCAL INFILE '"+dirname+"/"+tablefile+".csv' into table "+impdatabase+"."+tablename+" fields terminated by '\\t' ignore 1 LINES;") insconnection.commit() curinsdata.execute("SET FOREIGN_KEY_CHECKS=1;") logging.info("Data from \033[1;34;40m"+dirname+"/"+filename+"\033[1;37;40m has been inserted to table \033[1;34;40m"+tablename+"\033[1;36;40m") os.remove(dirname+"/"+tablefile+".csv") except (Exception,pymysql.Error) as error: print ("insert_data_from_file: Error :"+str(error)) finally: if(insconnection): insconnection.commit() curinsdata.close() insconnection.close() #verify data def verify_data(tablename,impuser,imppass,impserver,impport,impcharset,impdatabase,improwchunk,dirname): if len(tablename.split("."))>2: return() try: vrfyconnection=pymysql.connect(user=impuser, password=<PASSWORD>, host=impserver, port=int(impport), charset=impcharset, database=impdatabase) curvrfydata=vrfyconnection.cursor() curvrfydata.execute("select count(*) from "+".".join(tablename.split(".")[0:2])) rows=curvrfydata.fetchall() for row in rows: rowsfromtable=row[0] rowsfromfile=0 for thedumpfile in glob.glob(dirname+"/"+tablename+".*.csv.gz"): rowsfromfile+=rawincount(thedumpfile)-1 for thedumpfile in glob.glob(dirname+"/"+tablename+".csv.gz"): rowsfromfile+=rawincount(thedumpfile)-1 if (rowsfromfile==-1): rowsfromfile=0 if rowsfromfile==rowsfromtable: logging.info("Table \033[1;34;40m"+tablename+"\033[0;37;40m no of rows: \033[1;36;40m"+str(rowsfromfile)+" does match!\033[1;36;40m") else: logging.info("Table \033[1;34;40m"+tablename+"\033[1;31;40m DOES NOT match\033[1;37;40m") logging.info(" Total Rows from \033[1;34;40m"+tablename+" file(s) = \033[1;31;40m"+str(rowsfromfile)) logging.info(" Total Rows inserted to \033[1;34;40m"+tablename+" = \033[1;31;40m"+str(rowsfromtable)) except (Exception,pymysql.Error) as error: logging.error("\033[1;31;40mverify_data : Error :"+str(error)) finally: if(vrfyconnection): curvrfydata.close() vrfyconnection.close() #procedure how to use this script def usage(): print("\nUsage:
p in placed] + [(p + 1) for p in placed] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand) print 'Case #%d: %.10lf' % (cc + 1, ret) if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) return seen def func_a282d4b798df4cd1beae3b6f4b64e9be(infile): cases = int(infile.readline()) for cc in xrange(cases): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand) print 'Case #%d: %.10lf' % (cc + 1, ret) if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) return needed_budget def func_f6af7e28761841e5a52ffd5257546182(infile): cases = int(infile.readline()) for cc in xrange(cases): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand) print 'Case #%d: %.10lf' % (cc + 1, ret) if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) return lowest def func_eb3ced0cb1a447f6b293a40e86b079d6(infile): cases = int(infile.readline()) for cc in xrange(cases): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand) print 'Case #%d: %.10lf' % (cc + 1, ret) if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) return lowest_cnt def func_5e7d25143c234be8a84ef55d82ed6109(infile): cases = int(infile.readline()) for cc in xrange(cases): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand) print 'Case #%d: %.10lf' % (cc + 1, ret) if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) return bets def func_17ba2d357d09415d8b7cf07dd3b9620c(infile): cases = int(infile.readline()) for cc in xrange(cases): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt ==
from app import app from app import db from models.orcid import get_orcid_id_from_oauth from models.person import Person from models.person import PersonExistsException from models.person import make_person from models.person import refresh_orcid_info_and_save from models.person import connect_orcid from models.person import connect_twitter from models.person import disconnect_twitter from models.person import refresh_profile from models.person import refresh_person from models.person import delete_person from models.person import update_person from models.person import update_promos from models.person import make_temporary_person_from_orcid from models.person import top_acheivement_persons, avg_openess, get_sources from models.log_temp_profile import add_new_log_temp_profile from models.person import get_random_people from models.product import Product from models.product import get_all_products from models.refset import num_people_in_db from models.badge import Badge from models.badge import badge_configs from models.search import autocomplete from models.url_slugs_to_redirect import url_slugs_to_redirect from models.twitter import get_twitter_creds from util import safe_commit, get_badge_description from util import elapsed from flask import make_response from flask import request from flask import redirect from flask import abort from flask import jsonify from flask import render_template from flask import send_file from flask import g from flask import url_for import jwt from jwt import DecodeError from jwt import ExpiredSignature from functools import wraps import requests import stripe from requests_oauthlib import OAuth1 import os import sys import json import logging from operator import attrgetter from urlparse import parse_qs, parse_qsl from time import sleep from time import time logger = logging.getLogger("views") def json_dumper(obj): """ if the obj has a to_dict() function we've implemented, uses it to get dict. from http://stackoverflow.com/a/28174796 """ try: return obj.to_dict() except AttributeError: return obj.__dict__ def json_resp(thing): # hide_keys = request.args.get("hide", "").split(",") # if hide_keys: # for key_to_hide in hide_keys: # try: # del thing[key_to_hide] # except KeyError: # pass json_str = json.dumps(thing, sort_keys=True, default=json_dumper, indent=4) if request.path.endswith(".json") and (os.getenv("FLASK_DEBUG", False) == "True"): logger.info(u"rendering output through debug_api.html template") resp = make_response(render_template( 'debug_api.html', data=json_str)) resp.mimetype = "text/html" else: resp = make_response(json_str, 200) resp.mimetype = "application/json" return resp def abort_json(status_code, msg, **kwargs): body_dict = { "message": msg } body_dict.update(kwargs) resp_string = json.dumps(body_dict, sort_keys=True, indent=4) resp = make_response(resp_string, status_code) resp.mimetype = "application/json" abort(resp) @app.route("/<path:page>") # from http://stackoverflow.com/a/14023930/226013 @app.route("/") def index_view(path="index", page=""): if page.lower() in url_slugs_to_redirect: return redirect(u"http://v1.impactstory.org/{}".format(page.strip()), code=302) return render_template( 'index.html', is_local=os.getenv("IS_LOCAL", False), stripe_publishable_key=os.getenv("STRIPE_PUBLISHABLE_KEY") ) #support CORS @app.after_request def add_crossdomain_header(resp): resp.headers['Access-Control-Allow-Origin'] = "*" resp.headers['Access-Control-Allow-Methods'] = "POST, GET, OPTIONS, PUT, DELETE, PATCH, HEAD" resp.headers['Access-Control-Allow-Headers'] = "origin, content-type, accept, x-requested-with, authorization" # jason needs this to be able to see print() output in heroku local sys.stdout.flush() return resp @app.before_request def redirects(): new_url = None try: if request.headers["X-Forwarded-Proto"] == "https": pass elif "http://" in request.url: new_url = request.url.replace("http://", "https://") except KeyError: #logger.debug(u"There's no X-Forwarded-Proto header; assuming localhost, serving http.") pass if request.url.startswith("https://www.impactstory.org"): new_url = request.url.replace( "https://www.impactstory.org", "https://impactstory.org" ) logger.debug(u"URL starts with www; redirecting to " + new_url) if new_url: return redirect(new_url, 301) # permanent @app.route('/small-logo.png') def logo_small(): filename = "static/img/impactstory-logo.png" return send_file(filename, mimetype='image/png') @app.route("/favicon.ico") def favicon_ico(): return redirect(url_for("static", filename="img/favicon.ico")) ########################################################################### # API ########################################################################### @app.route("/api") def api_test(): return json_resp({"resp": "Impactstory: The Next Generation."}) @app.route("/api/test") def test0(): return jsonify({"test": True}) @app.route('/api/group/') def group(): resp = {} if not ('persons' in request.args and 'achievements' in request.args): abort(400) person_ids = request.args.getlist('persons') if not isinstance(person_ids, list): person_ids = [person_ids] achievement_names = request.args.getlist('achievements') if not isinstance(achievement_names, list): achievement_names = [achievement_names] persons = (Person.query.filter(Person.orcid_id.in_(person_ids)) .order_by(Person.openness.desc()) .all()) products = Product.query.filter(Product.orcid_id.in_(person_ids)).all() top_persons = top_acheivement_persons(person_ids, achievement_names, 3) resp['person_list'] = [person.to_dict() for person in persons] resp['top_person_list'] = [person.to_dict() for person in top_persons] resp['product_list'] = [product.to_dict() for product in products] resp['coauthor_list'] = [coauthor for person in persons if person.display_coauthors for coauthor in person.display_coauthors] badge_names = list(set([badge.name for person in persons for badge in person.badges_for_api])) grouped_badges = {} for badge_name in badge_names: from models.badge import badge_configs try: badge_config = badge_configs()[badge_name] except KeyError: continue grouped_badges[badge_name] = {'group': badge_config["group"], 'name': badge_config['name'], 'support_items': None, 'display_name': badge_config['display_name'], 'description': None, 'show_in_ui': True, # filtering this for above 'support_intro': None, 'context': None} sum_score = 0.0 num_products = 0.0 for person in persons: if person.get_badge(badge_name): badge_score = person.get_badge(badge_name) sum_score += person.num_products * badge_score.value num_products += person.num_products if num_products: grouped_badges[badge_name]['percentile'] = round(sum_score / num_products, 2) else: grouped_badges[badge_name]['percentile'] = None try: grouped_badges[badge_name]['description'] = get_badge_description(badge_name, grouped_badges[badge_name]['percentile']) except TypeError: grouped_badges[badge_name]['description'] = None resp['openness'] = grouped_badges['percent_fulltext']['percentile'] if 'percent_fulltext' in grouped_badges else None resp['grouped_badges'] = grouped_badges.values() resp['source_list'] = [source.to_dict() for source in get_sources(products)] return jsonify(resp) @app.route("/api/person/<orcid_id>/polling") @app.route("/api/person/<orcid_id>/polling.json") def profile_endpoint_polling(orcid_id): my_person = Person.query.filter_by(orcid_id=orcid_id).first() return json_resp(my_person.to_dict()) @app.route("/api/person/<orcid_id>") @app.route("/api/person/<orcid_id>.json") def profile_endpoint(orcid_id): my_person = Person.query.filter_by(orcid_id=orcid_id).first() # the right was to do this is save an is_deleted flag in the db and check it here. # this will work for now. deleted_orcid_ids = [ "0000-0003-4875-1447", "0000-0002-2942-6609", "0000-0002-4812-4745" ] if orcid_id in deleted_orcid_ids: abort_json(404, "This user is deleted") if not my_person: if not request.args.get("source"): if request.headers.getlist("X-Forwarded-For"): ip = request.headers.getlist("X-Forwarded-For")[0] if ip == "172.16.17.32": abort_json(429, """We've noticed you are making many requests. Please add ?source=YOUREMAILADDRESS to your API calls, or email us at <EMAIL> for more details on our API. Thanks!""") print u"making temporary person for {orcid_id}, referred by {referrer} using url {url}, ip {ip}".format( orcid_id=orcid_id, referrer=request.referrer, url=request.url, ip=request.remote_addr) my_person = make_temporary_person_from_orcid(orcid_id) print u"saving log_temp_profile for {}".format(my_person) temp_profile_log = add_new_log_temp_profile(my_person, request) return json_resp(my_person.to_dict()) @app.route("/api/person/twitter_screen_name/<screen_name>") @app.route("/api/person/twitter_screen_name/<screen_name>.json") def profile_endpoint_twitter(screen_name): res = db.session.query(Person.orcid_id).filter_by(twitter=screen_name).first() if not res: abort_json(404, "We don't have anyone with that twitter screen name") return json_resp({"id": res[0]}) # need to call it with https for it to work @app.route("/api/person/<orcid_id>", methods=["POST"]) @app.route("/api/person/<orcid_id>.json", methods=["POST"]) def modify_profile_endpoint(orcid_id): my_person = Person.query.filter_by(orcid_id=orcid_id).first() product_id = request.json["product"]["id"] my_product = next(my_product for my_product in my_person.products if my_product.id==product_id) url = request.json["product"]["fulltext_url"] my_product.set_oa_from_user_supplied_fulltext_url(url) my_person.recalculate_openness() safe_commit(db) return json_resp(my_person.to_dict()) @app.route("/api/person/<orcid_id>/refresh", methods=["POST"]) @app.route("/api/person/<orcid_id>/refresh.json", methods=["POST"]) def refresh_profile_endpoint(orcid_id): my_person = refresh_profile(orcid_id) return json_resp(my_person.to_dict()) @app.route("/api/person/<orcid_id>/fulltext", methods=["POST"]) @app.route("/api/person/<orcid_id>/fulltext.json", methods=["POST"]) def refresh_fulltext(orcid_id): my_person = Person.query.filter_by(orcid_id=orcid_id).first() my_person.recalculate_openness() safe_commit(db) return json_resp(my_person.to_dict()) @app.route("/api/person/<orcid_id>/tweeted-quickly", methods=["POST"]) def tweeted_quickly(orcid_id): my_person = Person.query.filter_by(orcid_id=orcid_id).first() if not my_person: print u"returning 404: orcid profile {} does not exist".format(orcid_id) abort_json(404, "That ORCID profile doesn't exist") my_person.tweeted_quickly = True success = safe_commit(db) return json_resp({"resp": "success"}) @app.route("/api/search/<search_str>") def search(search_str): ret = autocomplete(search_str) return jsonify({"list": ret, "count": len(ret)}) @app.route("/api/products") def all_products_endpoint(): res = get_all_products() return jsonify({"list": res }) @app.route("/api/people") def people_endpoint(): count = num_people_in_db() return jsonify({"count": count}) @app.route("/api/badges") def badges_about(): return json_resp(badge_configs()) @app.route("/api/donation", methods=["POST"]) def donation_endpoint(): stripe.api_key = os.getenv("STRIPE_API_KEY") metadata = { "full_name": request.json["fullName"], "orcid_id": request.json["orcidId"], "email": request.json["email"] } try: stripe.Charge.create( amount=request.json["cents"], currency="usd", source=request.json["tokenId"], description="Impactstory donation", metadata=metadata ) except stripe.error.CardError, e: # The card has been declined abort_json(499, "Sorry, your credit card was declined.") return jsonify({"message": "well done!"}) # user management ############################################################################## def parse_token(req): token = req.headers.get('Authorization').split()[1] return jwt.decode(token, os.getenv("JWT_KEY")) def login_required(f): @wraps(f) def decorated_function(*args, **kwargs): if not request.headers.get('Authorization'): response = jsonify(message='Missing authorization header') print u"in login_required with error, Missing authorization header" response.status_code = 401 return response try: payload = parse_token(request) except DecodeError: response = jsonify(message='Token is invalid') response.status_code = 401 print u"in login_required with error, got DecodeError" return response except ExpiredSignature: response = jsonify(message='Token has expired') response.status_code = 401 print u"in login_required with error, got DecodeError" return response # print u"in login_required. payload: {}: ".format(payload) g.my_person = None if "id" in payload: # this uses the current token format g.my_person = Person.query.filter_by(id=payload["id"]).first() if not g.my_person and "orcid_id" in payload: # fallback because some tokens don't have id? g.my_person = Person.query.filter_by(orcid_id=payload["orcid_id"]).first() if not g.my_person and "sub" in payload: # fallback for old token format g.my_person = Person.query.filter_by(orcid_id=payload["sub"]).first() if not g.my_person: print u"in login_required with error, no known keys in token payload: {}".format(payload) # print u"in login_required success, got a person {}".format(g.my_person) return f(*args, **kwargs) return decorated_function @app.route('/api/me', methods=["GET", "DELETE", "POST"]) @login_required def me(): if request.method == "GET": return jsonify({ "token":g.my_person.get_token(), "promos": g.my_person.promos }) elif request.method == "POST": updated_person = update_person(g.my_person, request.json) return jsonify({"token": updated_person.get_token()}) elif request.method == "DELETE": delete_person(orcid_id=g.my_person.orcid_id) return jsonify({"msg": "Alas, <NAME>! I knew him, Horatio"}) @app.route("/api/me/refresh", methods=["POST"]) @login_required def refresh_me(): refresh_person(g.my_person) return jsonify({"token": g.my_person.get_token()}) @app.route("/api/me/promos", methods=["GET"]) @login_required def update_promos_endpoint(): update_promos(g.my_person, request.json) return jsonify({"token": g.my_person.get_token()}) @app.route("/api/me/orcid/login", methods=["POST"]) def orcid_login(): print u"in orcid_login with request.json {}".format(request.json) my_orcid_id = get_orcid_id_from_oauth( request.json['code'], request.json['redirectUri'] ) if not my_orcid_id: print u"in orcid_login with error, no my_orcid_id" abort_json(401, "Bad ORCID response; the auth code you sent is probably expired.") my_person = Person.query.filter_by(orcid_id=my_orcid_id).first() if not my_person: print u"in orcid_login with error, no my_person" abort_json( 404, "We don't have that ORCID in the db.", identity_provider_id=my_orcid_id ) return jsonify({"token": my_person.get_token()}) @app.route("/api/me/orcid/connect", methods=["POST"]) @login_required def orcid_connect(): print u"in orcid_connect with request.json {}".format(request.json) orcid_id = get_orcid_id_from_oauth( request.json['code'], request.json['redirectUri'] ) if not orcid_id: print u"in orcid_login with error, no orcid_id" abort_json(500, "Invalid JSON return from ORCID during OAuth.") connect_orcid(g.my_person, orcid_id) return jsonify({"token": g.my_person.get_token()}) @app.route("/api/me/orcid/refresh", methods=["POST"]) @login_required def refresh_my_orcid(): refresh_orcid_info_and_save(g.my_person) return jsonify({"token": g.my_person.get_token()}) @app.route("/api/me/twitter/login", methods=["POST"]) def twitter_login(): twitter_creds = get_twitter_creds(request.json.get('oauth_token'), request.json.get('oauth_verifier')) if not twitter_creds: print u"error in twitter_login, empty twitter creds" abort_json(422, "empty twitter creds") my_person = Person.query.filter_by(twitter=twitter_creds["screen_name"]).first() if not my_person: abort_json( 404, "We don't have that Twitter in the db.", identity_provider_id=twitter_creds["screen_name"] ) return jsonify({"token": my_person.get_token()}) @app.route("/api/me/twitter/register", methods=["POST"]) def twitter_register_but_login_if_they_are_already_registered(): twitter_creds = get_twitter_creds(request.json.get('oauth_token'), request.json.get('oauth_verifier')) landing_page = request.json.get("customLandingPage", "default") if not twitter_creds: print u"error in twitter_register_but_login_if_they_are_already_registered, empty twitter creds" abort_json(422, "empty
if no contour level background because it's probably supposed to be a clean looking plot and critical point is labeled on so many others and it also is less interesting to see without the contour level background anyway. #make the dialog box that displays the critical temperature and pressure. #note, not doing this for other horizontal axes because those plots are more cluttered and because people can always look at this one instead and #because T-P plot for example has the information very obvious in the plot itself. if VerticalAxis=='Temperature': CriticalPointTextBoxVerticalPosition=135 elif VerticalAxis=='Pressure': CriticalPointTextBoxVerticalPosition=135 else: #only other option right now is enthalpy CriticalPointTextBoxVerticalPosition=100 #lable the critical point for case where entropy is on the horizontal axis ThePlot.annotate('Critical Temperature: '+RoundAndPadToString(CriticalTemperature,2)+'K\nCritical Pressure: '+RoundAndPadToString(CriticalPressure/(10**6))+'MPa', xy = (CriticalPointHorizontal, CriticalPointVertical), xytext = (-20, CriticalPointTextBoxVerticalPosition), textcoords = 'offset points', size=8, ha = 'center', va = 'center', arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=.3'), bbox = dict(boxstyle = 'square,pad=0.5', fc = 'white', alpha = 1.0), #see http://matplotlib.org/api/patches_api.html#matplotlib.patches.Rectangle for bbox dict options ) else: #not consolidating the following above with the plotting of the saturation line because don't want to clutter the plot any more when entropy is the horizontal axis. #plot the distinction between gas and supercritical fluid (the critical pressure line, above the critical temperature). is this correct for the enthalpy case? yes, it is. ThePlot.plot(CriticalPressureHorizontalAxisValues,CriticalPressureVerticalAxisValues,color='black',linestyle='--',dashes=(2,2)) #plot the distinction between vapour and gas (the critical temperature below the critical pressure) ThePlot.plot(CriticalTemperatureHorizontalAxisValues1,CriticalTemperatureVerticalAxisValues1,color='gray',linestyle='--',dashes=(2,2)) #plot the distinction between liquid and supercritical fluid (the critical temperature above the critical pressure) ThePlot.plot(CriticalTemperatureHorizontalAxisValues2,CriticalTemperatureVerticalAxisValues2,color='black',linestyle='--',dashes=(2,2)) #plot the supercritical fluid boundaries and lable the regions for the case when pressure is on the horizontal axis ThePlot.annotate('Supercritical Fluid', xy=SupercriticalFluidLabelCoordinates, ha = 'center', va = 'center') ThePlot.annotate('Liquid', xy=LiquidLabelCoordinates, ha = 'center', va = 'center') ThePlot.annotate('Gas', xy=GasLabelCoordinates, ha = 'center', va = 'center') ThePlot.annotate('Vapor', xy=VaporLabelCoordinates, ha = 'center', va = 'center', size=8) if VerticalAxis=='Temperature': #Liquid+Vapor region is collapsed on this plot, so can't annotate it pass else: #all(?) other will have the Liquid+Vapor region visible ThePlot.annotate('Liquid+Vapor', xy=LiquidVaporLabelCoordinates, ha = 'center', va = 'center', size=8) if ImageFileType=='object': return TheFigure,ThePlot else: #save the image image_data = cStringIO.StringIO() #setup the file handle TheFigure.savefig(image_data,format=ImageFileType) #make the image file plt.close('all') #releases all the RAM that is never released automatically return image_data.getvalue() def PlotHeatExchanger(Recuperator,RecuperatorInputParameters,ImageFileType='png',DeltaTVerticalAxisMax=None,cpRatioAxisMax=None): if DeltaTVerticalAxisMax is None: DeltaTVerticalAxisMax=nearestMultiple(Recuperator['ActualDeltaTs'].max(),10,direction='up') if cpRatioAxisMax is None: cpRatioAxisMax=nearestMultiple(Recuperator['ActualSpecificHeatRatios'].max(),1,direction='up') #first setup the plot title as a variable since it is so long PlotTitle=( 'Cooled Side Inlet: Temperature='+RoundAndPadToString(Recuperator['LowPressure']['ActualStartingProperties']['Temperature'],1)+'K, Pressure='+RoundAndPadToString(Recuperator['LowPressure']['ActualStartingProperties']['Pressure']/10**6,1)+'MPa, Mass Fraction='+RoundAndPadToString(RecuperatorInputParameters['LowPressure']['MassFraction'],2)+'\n' +'Heated Side Inlet: Temperature='+RoundAndPadToString(Recuperator['HighPressure']['ActualStartingProperties']['Temperature'],1)+'K, Pressure='+RoundAndPadToString(Recuperator['HighPressure']['ActualStartingProperties']['Pressure']/10**6,1)+'MPa, Mass Fraction='+RoundAndPadToString(RecuperatorInputParameters['HighPressure']['MassFraction'],4)+'\n' +r'$\Delta T_{min}$='+RoundAndPadToString(RecuperatorInputParameters['MinimumDeltaT'],1)+' K' +', Pressure Drop='+RoundAndPadToString(RecuperatorInputParameters['DeltaPPerDeltaT'],0)+' Pa/K' +', Inlet Pressure Ratio='+RoundAndPadToString(Recuperator['HighPressure']['ActualStartingProperties']['Pressure']/Recuperator['LowPressure']['ActualStartingProperties']['Pressure'],1) +r', $\phi$='+RoundAndPadToString(Recuperator['phi'],2) +r', $\varepsilon$='+RoundAndPadToString(Recuperator['Effectiveness'],2) ) #setup the figure with two subfigures side by side TheFigure,(Plot1,Plot2)=plt.subplots(nrows=1, ncols=2,figsize=(14,8)) TheFigure.subplots_adjust(left=.075,right=.93) #adust the spacing TheFigure.suptitle(PlotTitle,fontsize=12) #only show about 20 markers on the plots because if there are too many data points then the lines aren't distinguishable #all lines have the same number of data points, so assign this variable for shorter plot calls markevery=len(Recuperator['LowPressure']['ActualTemperatures'])/20 #left figure #plot the specific heats Plot1.plot(Recuperator['LowPressure']['ActualTemperatures'],Recuperator['LowPressure']['ActualSpecificHeats'],marker='o',markersize=4,markevery=markevery,linestyle='-',color='black',label=r'$c_{p,Cooled}$') Plot1.plot(Recuperator['LowPressure']['ActualTemperatures'],Recuperator['HighPressure']['ActualSpecificHeats'],marker='o',markersize=4,markevery=markevery,linestyle='-',color='green',label=r'$c_{p,Heated}$') Plot1.plot(Recuperator['LowPressure']['ActualTemperatures'],Recuperator['LowPressure']['ActualSpecificHeats']*RecuperatorInputParameters['LowPressure']['MassFraction'],marker='x',markersize=8,markevery=markevery,linestyle='-',color='black',label=r'$C_{Cooled}$') Plot1.plot(Recuperator['LowPressure']['ActualTemperatures'],Recuperator['HighPressure']['ActualSpecificHeats']*RecuperatorInputParameters['HighPressure']['MassFraction'],marker='x',markersize=8,markevery=markevery,linestyle='-',color='green',label=r'$C_{Heated}$') #probably want to add a second vertical axis here and plot the pressure drop as a pressure ratio for both high and low pressure sides. #label the plot Plot1.set_xlabel('Temperature, Cooled Side, [K]') Plot1.set_xlim(floor(Recuperator['HighPressure']['ActualStartingProperties']['Temperature']/10)*10,ceil(Recuperator['LowPressure']['ActualStartingProperties']['Temperature']/10)*10) #note, kg$_{LowPressure}$ is not really right if low pressure mass fraction is not 1 Plot1.set_ylabel(r'$c_{p}$, [J/(kg*K)] and C, [J/(kg$_{Cooled}$*K)]') Plot1.set_ylim(0,ceil(max((Recuperator['LowPressure']['ActualSpecificHeats']).max(),(Recuperator['HighPressure']['ActualSpecificHeats']).max(),(Recuperator['LowPressure']['ActualSpecificHeats']*RecuperatorInputParameters['LowPressure']['MassFraction']).max(),(Recuperator['HighPressure']['ActualSpecificHeats']*RecuperatorInputParameters['HighPressure']['MassFraction']).max())/1000)*1000) Plot1.grid(True) Plot1.legend() #right figure #now plot the temperature difference and SpecificHeat ratio line1=Plot2.plot(Recuperator['LowPressure']['ActualTemperatures'],Recuperator['ActualDeltaTs'],marker='o',markersize=4,markevery=markevery,linestyle='-',color='black',label=r'$\Delta T$') Plot2.set_ylim(0,DeltaTVerticalAxisMax) #now, setup a second vertical axis Plot2_SecondVerticalAxis = Plot2.twinx() #plot the SpecificHeat ratios line3=Plot2_SecondVerticalAxis.plot(Recuperator['LowPressure']['ActualTemperatures'],Recuperator['ActualSpecificHeatRatios'],marker='o',markersize=4,markevery=markevery,linestyle='-',color='red',label=r'$C_{Heated}/C_{Cooled}$') #draw a horizontal line at 1 so it is easy to see where the specific heat ratio crosses 1 line4=Plot2_SecondVerticalAxis.axhline(1,color='green',label='1') #########warning, still confused which SpecificHeatRatios to actually plot. currently plotting the one based on extreme temperatures and not the actual SpecificHeat ratios in the node################ #actually, now no longer doing this. SpecificHeatRatiosOriginal is deactivated in the above function, and now using ActualSpecificHeatRatios because of the fact that now have heaters and coolers optional #also, the other thing is that turned off the iterating for SpecificHeatRatios (currently just doing 1 iteration), so think ActualSpecificHeatRatiosOriginal wouldn't really be any different #if it were actually defined in some way #draw the average SpecificHeat ratio so can see when it crosses 1 # line4=plt.axhline(Recuperator['ActualSpecificHeatRatios'].mean()) #set the titles and labels Plot2.set_xlabel('Temperature, Cooled Side, [K]') Plot2.set_ylabel(r'$\Delta T = T_{Cooled} - T_{Heated}$, [K]') Plot2_SecondVerticalAxis.set_ylabel(r'Heat Capacity Ratio, $C_{Heated}/C_{Cooled}$') Plot2_SecondVerticalAxis.set_ylim(0,cpRatioAxisMax) Plot2.set_xlim(floor(round(Recuperator['HighPressure']['ActualStartingProperties']['Temperature'],5)/10)*10,ceil(Recuperator['LowPressure']['ActualStartingProperties']['Temperature']/10)*10) #round first to 5 decimal places because there seems to be some roundoff error that accumulated sometimes and don't want to round by 10K for something that can't even be seen Plot2.grid(True) #add the legend #need to manually build the legend here because it spand multiple axes #concatenate the line objects into one list AllLines=line1+line3+[line4] #for some reason axhline objects aren't in a list, so put them in one so they can be concatenated #get all the lables in the form of a new list Lables=[l.get_label() for l in AllLines] #create the legend with the list of line objects and the corresponding list of line lables Plot2.legend(AllLines,Lables,loc='upper left') #save the image image_data = cStringIO.StringIO() #setup the file handle TheFigure.savefig(image_data,format=ImageFileType) #make the image file plt.close('all') #releases all the RAM that is never released automatically return image_data.getvalue() #create some mildly general helper functions to simplify syntax for making *some* of the plots. def CreateFigure(TitleTEXT,HorizontalAxisTEXT,VerticalAxisTEXT,HorizontalAxisMin=None,HorizontalAxisMax=None,VerticalAxisMin=None,VerticalAxisMax=None,MaxNLocatorY=10,AspectRatio=1,ResolutionMultiplier=1,FontScaling=250): TheFigure=plt.figure() #set the size and shape of the figure DotsPerInch=FontScaling*ResolutionMultiplier #FontScaling controls the font and line width scaling (larger value means larger fonts). ResolutionMultiplier controls the resolution without changing the font scaling (no real effect for pdf output). Larger value means higher resolution. ImageDimensions={} ImageDimensions['width']=1800*ResolutionMultiplier/DotsPerInch #don't know where 1800 came from, but seems like it is a good number to start with relative to FontScaling=250 ImageDimensions['height']=ImageDimensions['width']*AspectRatio #don't need to scale this by the resolution because width already was TheFigure.set_size_inches(ImageDimensions['width'],ImageDimensions['height']) TheFigure.set_dpi(DotsPerInch) ThePlot=TheFigure.add_subplot(111) #see PlotCycle for some notes on why host_subplot may want to be used TheFigure.suptitle(TitleTEXT,fontsize=12) #set format the axes labels # ThePlot.xaxis.set_major_formatter(plt.FuncFormatter(thousands)) # ThePlot.yaxis.set_major_formatter(plt.FuncFormatter(thousands)) ThePlot.yaxis.set_major_locator(MaxNLocator(MaxNLocatorY)) #and set the axis limits if don't want to keep the default auto ranging if (HorizontalAxisMin is not None) and (HorizontalAxisMax is not None): ThePlot.set_xlim(left=HorizontalAxisMin,right=HorizontalAxisMax) if (VerticalAxisMin is not None) and (VerticalAxisMax is not None): ThePlot.set_ylim(bottom=VerticalAxisMin,top=VerticalAxisMax) ThePlot.grid(True) #label the plot ThePlot.set_xlabel(HorizontalAxisTEXT) ThePlot.set_ylabel(VerticalAxisTEXT) TheFigure.subplots_adjust(left=.125,right=.92) TheLines=[] #initialize this value return TheFigure,ThePlot,TheLines def AddSecondVerticalAxis(TheFigure,ThePlot,VerticalAxisTEXT,VerticalAxisMin=None,VerticalAxisMax=None): ThePlot_SecondVerticalAxis=ThePlot.twinx() ThePlot_SecondVerticalAxis.set_ylabel(VerticalAxisTEXT) ThePlot_SecondVerticalAxis.yaxis.set_major_locator(MaxNLocator(10)) #and add gridlines # ThePlot_SecondVerticalAxis.grid(True) #and remove grid lines for the other axis # ThePlot.grid(False) if (VerticalAxisMin is not None) and (VerticalAxisMax is not None): ThePlot_SecondVerticalAxis.set_ylim(bottom=VerticalAxisMin,top=VerticalAxisMax) #also, re-tweak the subplot layout TheFigure.subplots_adjust(right=.90) return ThePlot_SecondVerticalAxis def AddMultiAxisLegend(ThePlot,TheLines,loc=0): #get all the lables in the form of a new list Lables=[l.get_label() for l in TheLines] #create the legend with the list of line objects and the corresponding list of line lables ThePlot.legend(TheLines,Lables,fontsize='x-small',loc=loc) def AddASubPlotBelow(TheFigure,TheOtherPlot,HorizontalAxisTEXT,VerticalAxisTEXT,TheOtherPlotSecondVerticalAxis=None,HorizontalAxisMin=None,HorizontalAxisMax=None,VerticalAxisMin=None,VerticalAxisMax=None,MaxNLocatorY=10): TheOtherPlot.change_geometry(2,1,1) if TheOtherPlotSecondVerticalAxis is not None: TheOtherPlotSecondVerticalAxis.change_geometry(2,1,1) if HorizontalAxisTEXT is None: sharex=TheOtherPlot plt.setp(TheOtherPlot.get_xticklabels(),visible=False) else: sharex=None TheSubPlot=TheFigure.add_subplot(2,1,2,sharex=sharex) #set format the axes labels # TheSubPlot.xaxis.set_major_formatter(plt.FuncFormatter(thousands)) # TheSubPlot.yaxis.set_major_formatter(plt.FuncFormatter(thousands)) TheSubPlot.yaxis.set_major_locator(MaxNLocator(MaxNLocatorY)) #and set the axis limits if don't want to keep the default auto ranging if (HorizontalAxisMin is not None) and (HorizontalAxisMax is not None) and (HorizontalAxisTEXT is not None): TheSubPlot.set_xlim(left=HorizontalAxisMin,right=HorizontalAxisMax) if (VerticalAxisMin is not None) and (VerticalAxisMax is not None): TheSubPlot.set_ylim(bottom=VerticalAxisMin,top=VerticalAxisMax) TheSubPlot.grid(True) #label the rest of the plot TheSubPlot.set_ylabel(VerticalAxisTEXT) if HorizontalAxisTEXT is None: TheSubPlot.set_xlabel(TheOtherPlot.get_xlabel()) TheOtherPlot.set_xlabel('') else: TheSubPlot.set_xlabel(HorizontalAxisTEXT) TheSubPlotLines=[] #initialize this value return TheSubPlot,TheSubPlotLines def SaveTheFigure(TheFigure,FileName=None,ImageFileType='pdf'): #save the image image_data = cStringIO.StringIO() #setup the file handle TheFigure.savefig(image_data,format=ImageFileType) #make the image file plt.close('all') #releases all the RAM that is never released automatically. note, this statement currently limits working on more than one figure simultaneously (which you can do). may want to improve it to just close the current figure (TheFigure). #actually write the image if FileName is None: return image_data.getvalue() else: WriteBinaryData(image_data.getvalue(),FileName) def PlotParameterSweep(BaseInputFilePath,CaseName,PlotMagnitudeDescription,TheFigure=None,ThePlot=None,TheLines=None,HorizontalAxisMin=None,HorizontalAxisMax=None,HorizontalAxisLabel=None,VerticalAxisMin=None,VerticalAxisMax=None,VerticalAxisLabel=None,ContourLevelMin=None,ContourLevelMax=None,ContourLevelRoundTo=1,PlotSecondAxisAsLines=False,SecondAxisLineLabelSuffix='',SecondAxisLineLabel=None): Folder=BaseInputFilePath+'/'+CaseName+'/' #as noted below, these SWEPT independent variables are at the end of the group (either NonCO2CycleIndependentVariable or the end of everything) IndependentVariableValuesGrid=load(Folder+'IndependentVariableValues.npz')['IndependentVariableValuesGrid'] #note, IndependentVariableLabels referes to more than just the swept values in IndependentVariableValuesGrid IndependentVariableLabels=load(Folder+'IndependentVariableValues.npz')['IndependentVariableLabels'] try: #might want to do this more specifically to see if the file exists rather than some other type of error. NonCO2CycleIndependentVariableLabels=cPickle.load(open(Folder+'NonCO2CycleIndependentVariableLabels.p', 'rb')) ValueCount=cPickle.load(open(Folder+'ValueCount.p', 'rb')) except: NonCO2CycleIndependentVariableLabels=() ValueCount=(0,0,0,0) Results=load(Folder+'OptimizationResults.npz')['Results'] if PlotMagnitudeDescription=='Cycle Efficiency [%]': plotdata=Results[0]*100. TitleTEXT='Maximum Thermal Efficiency='+RoundAndPadToString(plotdata.max(),2)+'%' elif PlotMagnitudeDescription=='Cycle Exergy Efficiency [%]': TEMPplotdata=Results[0] #more setup for this case will be done below else: #plot an optimized parameter ###################!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #the following works since optimized values are always first in the IndependentVariableLabels and the script is currently not setup to work if NonCO2CycleIndependentVariableLabels is not () #and this script is not setup to work with multiple swept values. ###################!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if PlotMagnitudeDescription == 'Overall Pressure Ratio': ParameterToPlotIndex1=FindSubString(IndependentVariableLabels,'PreCompressor Pressure Ratio')[0][0]+1 #need to offset by 1 because the efficiency is the first element in the Results object ParameterToPlotIndex2=FindSubString(IndependentVariableLabels,'Main Compressor Pressure Ratio')[0][0]+1 #need to offset by 1 because the efficiency is the first element in the Results object plotdata=Results[ParameterToPlotIndex1]*Results[ParameterToPlotIndex2] else: ParameterToPlotIndex=FindSubString(IndependentVariableLabels,PlotMagnitudeDescription)[0][0]+1 #need to offset by 1 because the efficiency is the first element in the Results object plotdata=Results[ParameterToPlotIndex] TitleTEXT='' IndependentVariableLabels=NonCO2CycleIndependentVariableLabels+tuple(IndependentVariableLabels) #set the horizontal axis HorizontalAxis=IndependentVariableValuesGrid[0] #figure out how to find the axis labels #the swept values are placed at the end of the group (either NonCO2CycleIndependentVariable or the end of everything) if ValueCount[3]==2: VerticalAxisIndex=len(NonCO2CycleIndependentVariableLabels)-1 HorizontalAxisIndex=VerticalAxisIndex-1 elif ValueCount[3]==1: VerticalAxisIndex=-1 HorizontalAxisIndex=len(NonCO2CycleIndependentVariableLabels)-1 else: VerticalAxisIndex=-1 HorizontalAxisIndex=VerticalAxisIndex-1 if HorizontalAxisLabel is None: HorizontalAxisLabel=IndependentVariableLabels[HorizontalAxisIndex] Log2YScale=False if (PlotMagnitudeDescription=='Cycle Efficiency [%]') and (HorizontalAxisLabel=='Optimizer Population Size'): #do some special things for this case plotdata=plotdata-plotdata.min() #most of the following could have been accomplishd outside of this function (before writing to a file), but just putting them in here since there already has to be a special modification of the plotdata variable Log2YScale=True TitleTEXT='' PlotMagnitudeDescription='Cycle Efficiency Percentage Point Increase Relative to Lowest Case' #do some more things for the exergy efficiency calculation if PlotMagnitudeDescription=='Cycle Exergy Efficiency [%]': if ( ((HorizontalAxisLabel=='Maximum Temperature [K]') and (IndependentVariableLabels[VerticalAxisIndex]=='Minimum
<filename>klayout_dot_config/tech/EBeam/pymacros/photonic_crystals/photonic_crystals.py """ KLayout-SiEPIC library for photonic crystals, UBC and SFU ******* PCells: ******* 1) swg_fc - sub-wavelength grating (SWG) fibre coupler (FC) NOTE: after changing the code, the macro needs to be rerun to install the new implementation. The macro is also set to "auto run" to install the PCell when KLayout is run. Version history: 2017/07/07 <NAME> (Simon Fraser University, BC, Canada) and <NAME> (Simon Fraser University, BC, Canada) - swg_fc PCell 2017/07/07 <NAME> - library definition and github repo 2017/07/09 <NAME> - Added Cavity Hole Pcell 2017/07/09 <NAME> - Added 2D H0 Photonic crystal cavity with single bus waveguide and pins 2017/07/10 <NAME> - Added waveguide with impedance matching tapers for transition from external waveguide to Photonic crystal W1 waveguide 2017/07/10 <NAME> - Improved generation efficiency by using single hole as a cell 2017/07/12 <NAME> - Added the H0c test structure that includes grating couplers, waveguides, and H0c 2017/07/12 <NAME> - Added L3 cavity with double bus waveguide and pins - Added a drop bus to H0 cavity(coupling between waveguide?) - Simplified code for generation 2017/07/12 <NAME> - SWGFC litho test structure 2017/07/13 <NAME> - grating coupler to grating coupler reference test structure - photonic crystal with only W1 waveguide - photonic crystal W1 reference test structure 2017/07/16 Jing<NAME> - Adaptive cavity generation under difference waveguide location - Able to choose the number of waveguides per PhC cavity - Added etch layer (12,0) on PhC slabs - Added H0 cavity with oxide buffer, reduced the vertices (32->20) for holes due to much smaller hole radius - Deleteted cavity hole class - Added hexagon half cell and hexagon with hole half cell for PhC generation, in case needed - Added H0 cavity generated with hexagon cells - Added PhC test pattern 2017/08/19 <NAME>u - Added suspension anchor areas for the cavities with undercut 2018/02/14 <NAME> - Upgrade to KLayout 0.25 and SiEPIC-Tools v0.3.x, updating layers to SiEPIC-EBeam v0.3.0+ 2021/04/01 lukas - moved phc and swg cells to beta library """ # Import KLayout Python API methods: # Box, Point, Polygon, Text, Trans, LayerInfo, etc from pya import * import pya import math from SiEPIC.utils import get_technology, get_technology_by_name from SiEPIC.utils import arc, arc_wg, arc_to_waveguide, points_per_circle#,layout # -------------------------------------------------------------------------------------------------------------------------------------------------- # # -------------------------------------------------------------------------------------------------------------------------------------------------- # class PhC_test(pya.PCellDeclarationHelper): """ Input: length, width """ import numpy def __init__(self): # Important: initialize the super class super(PhC_test, self).__init__() self.param("a", self.TypeDouble, "lattice constant (microns)", default = 0.744) self.param("n", self.TypeInt, "Number of holes in x and y direction", default = 5) self.param("r", self.TypeDouble, "hole radius (microns)", default = 0.179) self.param("n_sweep", self.TypeInt, "Different sizes of holes", default = 13) self.param("n_vertices", self.TypeInt, "Vertices of a hole", default = 32) TECHNOLOGY = get_technology_by_name('EBeam') self.param("layer", self.TypeLayer, "Layer", default = TECHNOLOGY['Waveguide']) self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec']) self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec']) self.param("textl", self.TypeLayer, "Text Layer", default = TECHNOLOGY['Text']) self.param("etch", self.TypeLayer, "oxide etch layer", default = pya.LayerInfo(12, 0)) #def display_text_impl(self): # Provide a descriptive text for the cell # return "PhC resolution test_a%s-r%.3f-n%.3f" % \ #(self.a, self.r, self.n) def coerce_parameters_impl(self): pass def can_create_from_shape(self, layout, shape, layer): return False def produce_impl(self): # fetch the parameters dbu = self.layout.dbu ly = self.layout LayerSi = self.layer LayerSiN = ly.layer(self.layer) LayerPinRecN = ly.layer(self.pinrec) LayerDevRecN = ly.layer(self.devrec) LayerTextN = ly.layer(self.textl) LayerEtch = ly.layer(self.etch) TextLayerN = ly.layer(self.textl) # Fetch all the parameters: a = self.a/dbu r = self.r/dbu n_vertices = self.n_vertices n = int(math.ceil(self.n/2)) #print(n) n_sweep = self.n_sweep n_x = n n_y = n # Define Si slab and hole region for future subtraction Si_slab = pya.Region() hole = pya.Region() ruler = pya.Region() #hole_r = [r+50,r} ''' # translate to array (to pv) pv = [] for p in pcell_decl.get_parameters(): if p.name in param: pv.append(param[p.name]) else: pv.append(p.default) pcell_var = self.layout.add_pcell_variant(lib, pcell_decl.id(), pv) t_text = pya.Trans(x_offset-2*a_k, -y_offset-a_k*0.5) self.cell.insert(pya.CellInstArray(pcell_var, t_text)) for m in range(0,28): ruler.insert(pya.Box(-x_width+x_offset_2+x_spacing*m, -y_height+y_offset, x_width+x_offset_2+x_spacing*m, y_height+y_offset)) if m > 23: None else: ruler.insert(pya.Box(-y_height+x_offset_3, -x_width-y_offset_2+x_spacing*m, y_height+x_offset_3, x_width-y_offset_2+x_spacing*m)) for j in range(-n_y,n_y+1): if j%2 == 0: for i in range(-n_x,n_x+1): if i!=0: hole_x = abs(i)/i*(abs(i)-0.5)*a_k+x_offset hole_y = j*a_k*math.sqrt(3)/2 hole_trans = pya.Trans(Trans.R0, hole_x,hole_y) hole_t = hole_poly.transformed(hole_trans) hole.insert(hole_t) #print(hole_t) elif j%2 == 1: for i in range(-n_x,n_x+1): hole_x = i*a_k+x_offset hole_y = j*a_k*math.sqrt(3)/2 hole_trans = pya.Trans(Trans.R0, hole_x,hole_y) hole_t = hole_poly.transformed(hole_trans) hole.insert(hole_t) phc = Si_slab - hole phc = phc + ruler self.cell.shapes(LayerSiN).insert(phc) ''' class Hole_cell_half(pya.PCellDeclarationHelper): """ Input: length, width """ import numpy def __init__(self): # Important: initialize the super class super(Hole_cell_half, self).__init__() self.param("a", self.TypeDouble, "lattice constant (microns)", default = 0.744) self.param("r", self.TypeDouble, "hole radius (microns)", default = 0.179) TECHNOLOGY = get_technology_by_name('EBeam') self.param("layer", self.TypeLayer, "Layer", default = TECHNOLOGY['Waveguide']) self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec']) self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec']) self.param("textl", self.TypeLayer, "Text Layer", default = TECHNOLOGY['Text']) def display_text_impl(self): # Provide a descriptive text for the cell return "Cavity Hole Cell_a%s-r%.3f" % \ (self.a, self.r) def coerce_parameters_impl(self): pass def can_create_from_shape(self, layout, shape, layer): return False def produce_impl(self): # fetch the parameters dbu = self.layout.dbu ly = self.layout LayerSi = self.layer LayerSiN = ly.layer(self.layer) LayerPinRecN = ly.layer(self.pinrec) LayerDevRecN = ly.layer(self.devrec) LayerTextN = ly.layer(self.textl) # Fetch all the parameters: a = self.a/dbu r = self.r/dbu # function to generate points to create a circle def hexagon_hole_half(a,r): npts = 10 theta_div = math.pi/3 theta_div_hole = math.pi/npts triangle_length = a/math.sqrt(3) pts = [] for i in range(0,4): pts.append(Point.from_dpoint(pya.DPoint(triangle_length*math.cos(i*theta_div-math.pi/2), triangle_length*math.sin(i*theta_div-math.pi/2)))) for i in range(0, npts+1): pts.append(Point.from_dpoint(pya.DPoint(r*math.cos(math.pi/2-i*theta_div_hole), r*math.sin(math.pi/2-i*theta_div_hole)))) return pts hole_cell = pya.Region() hole_cell_pts = hexagon_hole_half(a,r) hole_cell_poly_half = pya.Polygon(hole_cell_pts) #hole_cell.insert(hole_cell_poly_0) self.cell.shapes(LayerSiN).insert(hole_cell_poly_half) class Hexagon_cell_half(pya.PCellDeclarationHelper): """ Input: length, width """ import numpy def __init__(self): # Important: initialize the super class super(Hexagon_cell_half, self).__init__() self.param("a", self.TypeDouble, "lattice constant (microns)", default = 0.744) self.param("r", self.TypeDouble, "hole radius (microns)", default = 0.179) TECHNOLOGY = get_technology_by_name('EBeam') self.param("layer", self.TypeLayer, "Layer", default = TECHNOLOGY['Waveguide']) self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec']) self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec']) self.param("textl", self.TypeLayer, "Text Layer", default = TECHNOLOGY['Text']) def display_text_impl(self): # Provide a descriptive text for the cell return "Cavity Hole Cell_a%s-r%.3f" % \ (self.a, self.r) def coerce_parameters_impl(self): pass def can_create_from_shape(self, layout, shape, layer): return False def produce_impl(self): # fetch the parameters dbu = self.layout.dbu ly = self.layout LayerSi = self.layer LayerSiN = ly.layer(self.layer) LayerPinRecN = ly.layer(self.pinrec) LayerDevRecN = ly.layer(self.devrec) LayerTextN = ly.layer(self.textl) # Fetch all the parameters: a = self.a/dbu r = self.r/dbu # function to generate points to create a circle def hexagon_half(a): theta_div = math.pi/3 triangle_length = a/math.sqrt(3) pts = [] for i in range(0,4): pts.append(Point.from_dpoint(pya.DPoint(triangle_length*math.cos(i*theta_div-math.pi/2), triangle_length*math.sin(i*theta_div-math.pi/2)))) return pts hexagon_pts = hexagon_half(a) hexagon_cell_poly_half = pya.Polygon(hexagon_pts) #hole_cell.insert(hole_cell_poly_0) self.cell.shapes(LayerSiN).insert(hexagon_cell_poly_half) class wg_triangle_tapers(pya.PCellDeclarationHelper): """ The PCell declaration for the strip waveguide taper. """ def __init__(self): # Important: initialize the super class super(wg_triangle_tapers, self).__init__() # declare the parameters self.param("tri_base", self.TypeDouble, "Triangle Base (microns)", default = 0.363) self.param("tri_height", self.TypeDouble, "Triangle Height (microns)", default = 0.426) self.param("taper_wg_length", self.TypeDouble, "Waveguide Length (microns)", default = 5) self.param("wg_width", self.TypeDouble, "Waveguide Width (microns)", default = 1) TECHNOLOGY = get_technology_by_name('EBeam') self.param("silayer", self.TypeLayer, "Layer", default = TECHNOLOGY['Waveguide']) self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec']) self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec']) self.param("textl", self.TypeLayer, "Text Layer", default = TECHNOLOGY['Text']) def display_text_impl(self): # Provide a descriptive text for the cell return "waveguide_triangular_tapers_%.3f-%.3f" % (self.taper_wg_length, self.wg_width) def can_create_from_shape_impl(self): return False def produce(self, layout, layers, parameters, cell): """ coerce parameters (make consistent) """ self._layers = layers self.cell = cell self._param_values = parameters self.layout = layout shapes = self.cell.shapes # cell: layout cell to place the layout # LayerSiN: which layer to use # w: waveguide width # length units in dbu # fetch the parameters dbu = self.layout.dbu ly = self.layout LayerSi = self.silayer LayerSiN = ly.layer(self.silayer) LayerPinRecN = ly.layer(self.pinrec) LayerDevRecN = ly.layer(self.devrec) LayerTextN = ly.layer(self.textl) base = int(round(self.tri_base/dbu)) height = int(round(self.tri_height/dbu)) l = int(round(self.taper_wg_length/dbu)) w = int(round(self.wg_width/dbu)) pts = [Point(-l,w/2), Point(-base,w/2), Point(0,w/2+height), Point(0,-(w/2+height)), Point(-base,-w/2),Point(-l,-w/2) ] shapes(LayerSiN).insert(Polygon(pts)) # Pins on the bus waveguide side: pin_length = 200 if l < pin_length+1: pin_length = int(l/3) pin_length = math.ceil(pin_length / 2.) * 2 if pin_length == 0: pin_length
'/{containerName}/{blob}'} async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. :param timeout: The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a> :type timeout: int :param break_period: For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. :type break_period: int :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>` """ error_map = kwargs.pop('error_map', None) if_modified_since = None if modified_access_conditions is not None: if_modified_since = modified_access_conditions.if_modified_since if_unmodified_since = None if modified_access_conditions is not None: if_unmodified_since = modified_access_conditions.if_unmodified_since if_match = None if modified_access_conditions is not None: if_match = modified_access_conditions.if_match if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match if_tags = None if modified_access_conditions is not None: if_tags = modified_access_conditions.if_tags comp = "lease" action = "break" # Construct URL url = self.break_lease.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} if break_period is not None: header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') if if_modified_since is not None: header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') if if_unmodified_since is not None: header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') if if_match is not None: header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') if if_tags is not None: header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) if cls: response_headers = { 'ETag': self._deserialize('str', response.headers.get('ETag')), 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } return cls(response, None, response_headers) break_lease.metadata = {'url': '/{containerName}/{blob}'} async def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): """The Create Snapshot operation creates a read-only snapshot of a blob. :param timeout: The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a> :type timeout: int :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. :type metadata: str :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str :param cpk_info: Additional parameters for the operation :type cpk_info: ~azure.storage.blob.models.CpkInfo :param cpk_scope_info: Additional parameters for the operation :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: :class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>` """ error_map = kwargs.pop('error_map', None) encryption_key = None if cpk_info is not None: encryption_key = cpk_info.encryption_key encryption_key_sha256 = None if cpk_info is not None: encryption_key_sha256 = cpk_info.encryption_key_sha256 encryption_algorithm = None if cpk_info is not None: encryption_algorithm = cpk_info.encryption_algorithm encryption_scope = None if cpk_scope_info is not None: encryption_scope = cpk_scope_info.encryption_scope if_modified_since = None if modified_access_conditions is not None: if_modified_since = modified_access_conditions.if_modified_since if_unmodified_since = None if modified_access_conditions is not None: if_unmodified_since = modified_access_conditions.if_unmodified_since if_match = None if modified_access_conditions is not None: if_match = modified_access_conditions.if_match if_none_match = None if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match if_tags = None if modified_access_conditions is not None: if_tags = modified_access_conditions.if_tags lease_id = None if lease_access_conditions is not None: lease_id = lease_access_conditions.lease_id comp = "snapshot" # Construct URL url = self.create_snapshot.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} if metadata is not None: header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') if encryption_key is not None: header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') if encryption_key_sha256 is not None: header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') if encryption_algorithm is not None: header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') if encryption_scope is not None: header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') if if_modified_since is not None: header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') if if_unmodified_since is not None: header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') if if_match is not None: header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') if if_tags is not None: header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') if lease_id is not None: header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') # Construct and send request request = self._client.put(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) if cls: response_headers = { 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), 'ETag': self._deserialize('str', response.headers.get('ETag')), 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } return cls(response, None, response_headers) create_snapshot.metadata = {'url': '/{containerName}/{blob}'} async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): """The Start Copy From URL operation copies a blob or an internet resource to a new blob. :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. :type copy_source: str :param timeout: The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a> :type timeout: int :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and
in place of the original ones. Returns: Nothing. """ log = get_logger() if not os.path.isfile(failpath): raise RuntimeError("failure yaml file {} does not exist".format(failpath)) fyml = yaml_read(failpath) step = fyml["step"] name = fyml["task"] workername = fyml["workername"] workeropts = fyml["worker_opts"] grph = fyml["graph"] origopts = fyml["opts"] nproc = fyml["procs"] comm = None rank = 0 nworld = 1 if use_mpi and (nproc > 1): from mpi4py import MPI comm = MPI.COMM_WORLD nworld = comm.size rank = comm.rank if nworld != nproc: if rank == 0: log.warning("WARNING: original task was run with {} processes, re-running with {} instead".format(nproc, nworld)) opts = origopts if newopts is not None: log.warning("WARNING: overriding original options") opts = newopts worker = get_worker(step, workername, workeropts) rundir = io.get_pipe_rundir() logdir = os.path.join(rundir, io.get_pipe_logdir()) (night, gname) = graph_night_split(name) nlogdir = os.path.join(logdir, night) # For this task, we will temporarily redirect stdout and stderr # to a task-specific log file. tasklog = os.path.join(nlogdir, "{}.log".format(gname)) if rank == 0: if os.path.isfile(tasklog): os.remove(tasklog) if comm is not None: comm.barrier() failcount = 0 with stdouterr_redirected(to=tasklog, comm=comm): try: log.debug("re-trying step {}, task {} with {} processes".format(step, name, nworld)) worker.run(grph, name, opts, comm=comm) except: msg = "FAILED: step {} task {} process {}".format(step, name, rank) log.error(msg) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.error(''.join(lines)) if group_rank == 0: failcount= 1 if comm is not None: failcount = comm.bcast(failcount, root=0) if rank == 0: if failcount > 0: log.error("{} of {} processes raised an exception".format(failcount, nworld)) else: # success, clear failure file now if os.path.isfile(failpath): os.remove(failpath) return def run_steps(first, last, spectrographs=None, nightstr=None, comm=None): """ Run multiple sequential pipeline steps. This function first takes the communicator and the requested processes per task and splits the communicator to form groups of processes of the desired size. It then takes the full dependency graph and extracts all the tasks for a given step. These tasks are then distributed among the groups of processes. Each process group loops over its assigned tasks. For each task, it redirects stdout/stderr to a per-task file and calls run_task(). If any process in the group throws an exception, then the traceback and all information (graph and options) needed to re-run the task are written to disk. After all process groups have finished, the state of the full graph is merged from all processes. This way a failure of one process on one task will be propagated as a failed task to all processes. Args: first (str): the first pipeline step to run. last (str): the last pipeline step to run. spectrogrphs (str): comma-separated list of spectrographs to use. nightstr (str): comma-separated list of regex patterns. comm (mpi4py.Comm): the full communicator to use for whole step. Returns: Nothing. """ log = get_logger() rank = 0 nproc = 1 if comm is not None: rank = comm.rank nproc = comm.size # get the full graph grph = None if rank == 0: grph = load_prod(nightstr=nightstr, spectrographs=spectrographs) graph_db_check(grph) if comm is not None: grph = comm.bcast(grph, root=0) # read run options from disk rundir = io.get_pipe_rundir() optfile = os.path.join(rundir, "options.yaml") opts = None if rank == 0: opts = yaml_read(optfile) if comm is not None: opts = comm.bcast(opts, root=0) # compute the ordered list of steps to run firststep = None if first is None: firststep = 0 else: s = 0 for st in step_types: if st == first: firststep = s s += 1 laststep = None if last is None: laststep = len(step_types) else: s = 1 for st in step_types: if st == last: laststep = s s += 1 if rank == 0: log.info("running steps {} to {}".format(step_types[firststep], step_types[laststep-1])) # Mark our steps as in progress for st in range(firststep, laststep): for name, nd in grph.items(): if nd["type"] == step_file_types[step_types[st]]: if nd["state"] != "done": nd["state"] = "running" if rank == 0: graph_db_write(grph) # Run the steps. Each step updates the graph in place to track # the state of all nodes. for st in range(firststep, laststep): runfile = None if rank == 0: log.info("starting step {} at {}".format(step_types[st], time.asctime())) grph, ntask, failtask = run_step(step_types[st], grph, opts, comm=comm) if rank == 0: log.info("completed step {} at {}".format(step_types[st], time.asctime())) log.info(" {} total tasks, {} failures".format(ntask, failtask)) graph_db_write(grph) if (ntask > 0) and (ntask == failtask): if rank == 0: log.info("step {}: all tasks failed, quiting at {}".format(step_types[st], time.asctime())) break if comm is not None: comm.barrier() if rank == 0: log.info("finished steps {} to {}".format(step_types[firststep], step_types[laststep-1])) return def shell_job(path, logroot, desisetup, commands, comrun="", mpiprocs=1, threads=1): with open(path, "w") as f: f.write("#!/bin/bash\n\n") f.write("now=`date +%Y%m%d-%H:%M:%S`\n") f.write("export STARTTIME=${now}\n") f.write("log={}_${{now}}.log\n\n".format(logroot)) f.write("source {}\n\n".format(desisetup)) f.write("export OMP_NUM_THREADS={}\n\n".format(threads)) run = "" if comrun != "": run = "{} {}".format(comrun, mpiprocs) for com in commands: executable = com.split(" ")[0] # f.write("which {}\n".format(executable)) f.write("echo logging to ${log}\n") f.write("time {} {} >>${{log}} 2>&1\n\n".format(run, com)) mode = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH os.chmod(path, mode) return def nersc_job(host, path, logroot, desisetup, commands, nodes=1, \ nodeproc=1, minutes=10, multisrun=False, openmp=False, multiproc=False, \ queue="debug", jobname="desipipe"): hours = int(minutes/60) fullmin = int(minutes - 60*hours) timestr = "{:02d}:{:02d}:00".format(hours, fullmin) totalnodes = nodes if multisrun: # we are running every command as a separate srun # and backgrounding them. In this case, the nodes # given are per command, so we need to compute the # total. totalnodes = nodes * len(commands) with open(path, "w") as f: f.write("#!/bin/bash -l\n\n") if queue == "debug": f.write("#SBATCH --partition=debug\n") else: f.write("#SBATCH --partition=regular\n") if host == "cori": f.write("#SBATCH --constraint=haswell\n") elif host == "coriknl": f.write("#SBATCH --constraint=knl,quad,cache\n") f.write("#SBATCH --core-spec=4\n") f.write("#SBATCH --account=desi\n") f.write("#SBATCH --nodes={}\n".format(totalnodes)) f.write("#SBATCH --time={}\n".format(timestr)) f.write("#SBATCH --job-name={}\n".format(jobname)) f.write("#SBATCH --output={}_%j.log\n".format(logroot)) f.write("echo Starting slurm script at `date`\n\n") f.write("source {}\n\n".format(desisetup)) f.write("# Set TMPDIR to be on the ramdisk\n") f.write("export TMPDIR=/dev/shm\n\n") if host == "edison": f.write("cpu_per_core=2\n\n") f.write("node_cores=24\n\n") elif host == "cori": f.write("cpu_per_core=2\n\n") f.write("node_cores=32\n\n") elif host == "coriknl": f.write("cpu_per_core=4\n\n") f.write("node_cores=64\n\n") else: raise RuntimeError("Unsupported NERSC host") f.write("nodes={}\n".format(nodes)) f.write("node_proc={}\n".format(nodeproc)) f.write("node_thread=$(( node_cores / node_proc ))\n") f.write("node_depth=$(( cpu_per_core * node_thread ))\n") f.write("procs=$(( nodes * node_proc ))\n\n") if openmp: f.write("export OMP_NUM_THREADS=${node_thread}\n") f.write("export OMP_PLACES=threads\n") f.write("export OMP_PROC_BIND=spread\n") else: f.write("export OMP_NUM_THREADS=1\n") f.write("\n") runstr = "srun" if multiproc: runstr = "{} --cpu_bind=no".format(runstr) f.write("export KMP_AFFINITY=disabled\n") f.write("\n") else: runstr = "{} --cpu_bind=cores".format(runstr) f.write("run=\"{} -n ${{procs}} -N ${{nodes}} -c ${{node_depth}}\"\n\n".format(runstr)) f.write("now=`date +%Y%m%d-%H:%M:%S`\n") f.write("echo \"job datestamp = ${now}\"\n") f.write("log={}_${{now}}.log\n\n".format(logroot)) f.write("envlog={}_${{now}}.env\n".format(logroot)) f.write("env > ${envlog}\n\n") for com in commands: comlist = com.split(" ") executable = comlist.pop(0) f.write("ex=`which {}`\n".format(executable)) f.write("app=\"${ex}.app\"\n") f.write("if [ -x ${app} ]; then\n") f.write(" if [ ${ex} -nt ${app} ]; then\n") f.write(" app=${ex}\n") f.write(" fi\n") f.write("else\n") f.write(" app=${ex}\n") f.write("fi\n") f.write("echo calling {} at `date`\n\n".format(executable)) f.write("export STARTTIME=`date +%Y%m%d-%H:%M:%S`\n") f.write("echo ${{run}} ${{app}} {}\n".format(" ".join(comlist))) f.write("time ${{run}} ${{app}} {} >>${{log}} 2>&1".format(" ".join(comlist))) if multisrun: f.write(" &") f.write("\n\n") if multisrun: f.write("wait\n\n") f.write("echo done with slurm script at `date`\n") return def nersc_shifter_job(host, path, img, specdata, specredux, desiroot, logroot, desisetup, commands, nodes=1, \ nodeproc=1, minutes=10, multisrun=False, openmp=False, multiproc=False, \ queue="debug", jobname="desipipe"): hours = int(minutes/60) fullmin = int(minutes - 60*hours) timestr = "{:02d}:{:02d}:00".format(hours, fullmin) totalnodes = nodes if multisrun: # we are running every command as a separate srun # and backgrounding them. In this case, the nodes # given are per command, so we need to compute the # total. totalnodes = nodes * len(commands) with open(path, "w") as f: f.write("#!/bin/bash -l\n\n") f.write("#SBATCH --image={}\n".format(img)) if queue == "debug": f.write("#SBATCH --partition=debug\n") else: f.write("#SBATCH --partition=regular\n") if host == "cori": f.write("#SBATCH --constraint=haswell\n") elif host == "coriknl": f.write("#SBATCH --constraint=knl,quad,cache\n") f.write("#SBATCH --core-spec=4\n") f.write("#SBATCH --account=desi\n") f.write("#SBATCH --nodes={}\n".format(totalnodes)) f.write("#SBATCH --time={}\n".format(timestr)) f.write("#SBATCH --job-name={}\n".format(jobname)) f.write("#SBATCH --output={}_%j.log\n".format(logroot)) f.write("#SBATCH --volume=\"{}:/desi/root;{}:/desi/spectro_data;{}:/desi/spectro_redux\"\n\n".format(desiroot, specdata, specredux)) f.write("echo Starting slurm script at `date`\n\n") f.write("source {}\n\n".format(desisetup)) f.write("# Set TMPDIR to be on the ramdisk\n") f.write("export TMPDIR=/dev/shm\n\n") if host == "edison": f.write("cpu_per_core=2\n\n") f.write("node_cores=24\n\n") elif host == "cori": f.write("cpu_per_core=2\n\n") f.write("node_cores=32\n\n") elif host == "coriknl": f.write("cpu_per_core=4\n\n") f.write("node_cores=64\n\n") else: raise RuntimeError("Unsupported NERSC host") f.write("nodes={}\n".format(nodes)) f.write("node_proc={}\n".format(nodeproc)) f.write("node_thread=$(( node_cores / node_proc ))\n") f.write("node_depth=$(( cpu_per_core * node_thread ))\n") f.write("procs=$(( nodes * node_proc ))\n\n") if openmp: f.write("export OMP_NUM_THREADS=${node_thread}\n") f.write("export OMP_PLACES=threads\n") f.write("export OMP_PROC_BIND=spread\n") else: f.write("export OMP_NUM_THREADS=1\n") f.write("\n") runstr = "srun"
<filename>test/test_runtime.py """Tests for Runtime class.""" # pylint: disable=protected-access import logging import os import pathlib import subprocess from contextlib import contextmanager from shutil import rmtree from typing import Any, Iterator, List, Type, Union import pytest from _pytest.monkeypatch import MonkeyPatch from flaky import flaky from packaging.version import Version from pytest_mock import MockerFixture from ansible_compat.constants import INVALID_PREREQUISITES_RC from ansible_compat.errors import ( AnsibleCommandError, AnsibleCompatError, InvalidPrerequisiteError, ) from ansible_compat.runtime import CompletedProcess, Runtime def test_runtime_version(runtime: Runtime) -> None: """Tests version property.""" version = runtime.version assert isinstance(version, Version) # tests that caching property value worked (coverage) assert version == runtime.version @pytest.mark.parametrize( "require_module", (True, False), ids=("module-required", "module-unrequired"), ) def test_runtime_version_outdated(require_module: bool) -> None: """Checks that instantiation raises if version is outdated.""" with pytest.raises(RuntimeError, match="Found incompatible version of ansible"): Runtime(min_required_version="9999.9.9", require_module=require_module) def test_runtime_missing_ansible_module(monkeypatch: MonkeyPatch) -> None: """Checks that we produce a RuntimeError when ansible module is missing.""" class RaiseException: """Class to raise an exception.""" def __init__(self, *args: Any, **kwargs: Any) -> None: raise ModuleNotFoundError() monkeypatch.setattr("importlib.import_module", RaiseException) with pytest.raises(RuntimeError, match="Unable to find Ansible python module."): Runtime(require_module=True) def test_runtime_mismatch_ansible_module(monkeypatch: MonkeyPatch) -> None: """Test that missing module is detected.""" monkeypatch.setattr("ansible.release.__version__", "0.0.0", raising=False) with pytest.raises(RuntimeError, match="versions do not match"): Runtime(require_module=True) def test_runtime_require_module() -> None: """Check that require_module successful pass.""" Runtime(require_module=True) def test_runtime_version_fail_module(mocker: MockerFixture) -> None: """Tests for failure to detect Ansible version.""" patched = mocker.patch( "ansible_compat.runtime.parse_ansible_version", autospec=True, ) patched.side_effect = InvalidPrerequisiteError( "Unable to parse ansible cli version" ) runtime = Runtime() with pytest.raises( InvalidPrerequisiteError, match="Unable to parse ansible cli version" ): runtime.version # pylint: disable=pointless-statement def test_runtime_version_fail_cli(mocker: MockerFixture) -> None: """Tests for failure to detect Ansible version.""" mocker.patch( "ansible_compat.runtime.Runtime.exec", return_value=CompletedProcess( ["x"], returncode=123, stdout="oops", stderr="some error" ), autospec=True, ) runtime = Runtime() with pytest.raises( RuntimeError, match="Unable to find a working copy of ansible executable." ): runtime.version # pylint: disable=pointless-statement def test_runtime_prepare_ansible_paths_validation() -> None: """Check that we validate collection_path.""" runtime = Runtime() runtime.config.collections_paths = "invalid-value" # type: ignore with pytest.raises(RuntimeError, match="Unexpected ansible configuration"): runtime._prepare_ansible_paths() @pytest.mark.parametrize( ("folder", "role_name", "isolated"), ( ("ansible-role-sample", "acme.sample", True), ("acme.sample2", "acme.sample2", True), ("sample3", "acme.sample3", True), ("sample4", "acme.sample4", False), ), ids=("1", "2", "3", "4"), ) def test_runtime_install_role( caplog: pytest.LogCaptureFixture, folder: str, role_name: str, isolated: bool, ) -> None: """Checks that we can install roles.""" caplog.set_level(logging.INFO) project_dir = os.path.join(os.path.dirname(__file__), "roles", folder) runtime = Runtime(isolated=isolated, project_dir=project_dir) runtime.prepare_environment(install_local=True) # check that role appears as installed now result = runtime.exec(["ansible-galaxy", "list"]) assert result.returncode == 0, result assert role_name in result.stdout runtime.clean() # also test that clean does not break when cache_dir is missing tmp_dir = runtime.cache_dir runtime.cache_dir = None runtime.clean() runtime.cache_dir = tmp_dir def test_prepare_environment_with_collections(tmp_path: pathlib.Path) -> None: """Check that collections are correctly installed.""" runtime = Runtime(isolated=True, project_dir=str(tmp_path)) runtime.prepare_environment(required_collections={"community.molecule": "0.1.0"}) def test_runtime_install_requirements_missing_file() -> None: """Check that missing requirements file is ignored.""" # Do not rely on this behavior, it may be removed in the future runtime = Runtime() runtime.install_requirements("/that/does/not/exist") @pytest.mark.parametrize( ("file", "exc", "msg"), ( ( "/dev/null", InvalidPrerequisiteError, "file is not a valid Ansible requirements file", ), ( os.path.join( os.path.dirname(__file__), "assets", "requirements-invalid-collection.yml", ), AnsibleCommandError, "Got 1 exit code while running: ansible-galaxy", ), ( os.path.join( os.path.dirname(__file__), "assets", "requirements-invalid-role.yml", ), AnsibleCommandError, "Got 1 exit code while running: ansible-galaxy", ), ), ids=("empty", "invalid-collection", "invalid-role"), ) def test_runtime_install_requirements_invalid_file( file: str, exc: Type[Any], msg: str ) -> None: """Check that invalid requirements file is raising.""" runtime = Runtime() with pytest.raises( exc, match=msg, ): runtime.install_requirements(file) @contextmanager def remember_cwd(cwd: str) -> Iterator[None]: """Context manager for chdir.""" curdir = os.getcwd() try: os.chdir(cwd) yield finally: os.chdir(curdir) # # https://github.com/box/flaky/issues/170 @flaky(max_runs=3) # type: ignore def test_prerun_reqs_v1(caplog: pytest.LogCaptureFixture, runtime: Runtime) -> None: """Checks that the linter can auto-install requirements v1 when found.""" cwd = os.path.realpath( os.path.join( os.path.dirname(os.path.realpath(__file__)), "..", "examples", "reqs_v1" ) ) with remember_cwd(cwd): with caplog.at_level(logging.INFO): runtime.prepare_environment() assert any( msg.startswith("Running ansible-galaxy role install") for msg in caplog.messages ) assert all( "Running ansible-galaxy collection install" not in msg for msg in caplog.messages ) @flaky(max_runs=3) # type: ignore def test_prerun_reqs_v2(caplog: pytest.LogCaptureFixture, runtime: Runtime) -> None: """Checks that the linter can auto-install requirements v2 when found.""" cwd = os.path.realpath( os.path.join( os.path.dirname(os.path.realpath(__file__)), "..", "examples", "reqs_v2" ) ) with remember_cwd(cwd): with caplog.at_level(logging.INFO): runtime.prepare_environment() assert any( msg.startswith("Running ansible-galaxy role install") for msg in caplog.messages ) assert any( msg.startswith("Running ansible-galaxy collection install") for msg in caplog.messages ) def test__update_env_no_old_value_no_default_no_value(monkeypatch: MonkeyPatch) -> None: """Make sure empty value does not touch environment.""" monkeypatch.delenv("DUMMY_VAR", raising=False) runtime = Runtime() runtime._update_env("DUMMY_VAR", []) assert "DUMMY_VAR" not in runtime.environ def test__update_env_no_old_value_no_value(monkeypatch: MonkeyPatch) -> None: """Make sure empty value does not touch environment.""" monkeypatch.delenv("DUMMY_VAR", raising=False) runtime = Runtime() runtime._update_env("DUMMY_VAR", [], "a:b") assert "DUMMY_VAR" not in runtime.environ def test__update_env_no_default_no_value(monkeypatch: MonkeyPatch) -> None: """Make sure empty value does not touch environment.""" monkeypatch.setenv("DUMMY_VAR", "a:b") runtime = Runtime() runtime._update_env("DUMMY_VAR", []) assert runtime.environ["DUMMY_VAR"] == "a:b" @pytest.mark.parametrize( ("value", "result"), ( (["a"], "a"), (["a", "b"], "a:b"), (["a", "b", "c"], "a:b:c"), ), ) def test__update_env_no_old_value_no_default( monkeypatch: MonkeyPatch, value: List[str], result: str ) -> None: """Values are concatenated using : as the separator.""" monkeypatch.delenv("DUMMY_VAR", raising=False) runtime = Runtime() runtime._update_env("DUMMY_VAR", value) assert runtime.environ["DUMMY_VAR"] == result @pytest.mark.parametrize( ("default", "value", "result"), ( ("a:b", ["c"], "c:a:b"), ("a:b", ["c:d"], "c:d:a:b"), ), ) def test__update_env_no_old_value( monkeypatch: MonkeyPatch, default: str, value: List[str], result: str ) -> None: """Values are appended to default value.""" monkeypatch.delenv("DUMMY_VAR", raising=False) runtime = Runtime() runtime._update_env("DUMMY_VAR", value, default) assert runtime.environ["DUMMY_VAR"] == result @pytest.mark.parametrize( ("old_value", "value", "result"), ( ("a:b", ["c"], "c:a:b"), ("a:b", ["c:d"], "c:d:a:b"), ), ) def test__update_env_no_default( monkeypatch: MonkeyPatch, old_value: str, value: List[str], result: str ) -> None: """Values are appended to preexisting value.""" monkeypatch.setenv("DUMMY_VAR", old_value) runtime = Runtime() runtime._update_env("DUMMY_VAR", value) assert runtime.environ["DUMMY_VAR"] == result @pytest.mark.parametrize( ("old_value", "default", "value", "result"), ( ("", "", ["e"], "e"), ("a", "", ["e"], "e:a"), ("", "c", ["e"], "e"), ("a", "c", ["e:f"], "e:f:a"), ), ) def test__update_env( monkeypatch: MonkeyPatch, old_value: str, default: str, # pylint: disable=unused-argument value: List[str], result: str, ) -> None: """Defaults are ignored when preexisting value is present.""" monkeypatch.setenv("DUMMY_VAR", old_value) runtime = Runtime() runtime._update_env("DUMMY_VAR", value) assert runtime.environ["DUMMY_VAR"] == result def test_require_collection_wrong_version(runtime: Runtime) -> None: """Tests behaviour of require_collection.""" subprocess.check_output( [ "ansible-galaxy", "collection", "install", "containers.podman", "-p", "~/.ansible/collections", ] ) with pytest.raises(InvalidPrerequisiteError) as pytest_wrapped_e: runtime.require_collection("containers.podman", "9999.9.9") assert pytest_wrapped_e.type == InvalidPrerequisiteError assert pytest_wrapped_e.value.code == INVALID_PREREQUISITES_RC def test_require_collection_invalid_name(runtime: Runtime) -> None: """Check that require_collection raise with invalid collection name.""" with pytest.raises( InvalidPrerequisiteError, match="Invalid collection name supplied:" ): runtime.require_collection("that-is-invalid") def test_require_collection_invalid_collections_path(runtime: Runtime) -> None: """Check that require_collection raise with invalid collections path.""" runtime.config.collections_paths = "/that/is/invalid" # type: ignore with pytest.raises( InvalidPrerequisiteError, match="Unable to determine ansible collection paths" ): runtime.require_collection("community.molecule") def test_require_collection_preexisting_broken(tmp_path: pathlib.Path) -> None: """Check that require_collection raise with broken pre-existing collection.""" runtime = Runtime(isolated=True, project_dir=str(tmp_path)) dest_path: str = runtime.config.collections_paths[0] dest = os.path.join(dest_path, "ansible_collections", "foo", "bar") os.makedirs(dest, exist_ok=True) with pytest.raises(InvalidPrerequisiteError, match="missing MANIFEST.json"): runtime.require_collection("foo.bar") def test_require_collection(runtime_tmp: Runtime) -> None: """Check that require collection successful install case.""" runtime_tmp.require_collection("community.molecule", "0.1.0") @pytest.mark.parametrize( ("name", "version", "install"), ( ("fake_namespace.fake_name", None, True), ("fake_namespace.fake_name", "9999.9.9", True), ("fake_namespace.fake_name", None, False), ), ids=("a", "b", "c"), ) def test_require_collection_missing( name: str, version: str, install: bool, runtime: Runtime ) -> None: """Tests behaviour of require_collection, missing case.""" with pytest.raises(AnsibleCompatError) as pytest_wrapped_e: runtime.require_collection(name=name, version=version, install=install) assert pytest_wrapped_e.type == InvalidPrerequisiteError assert pytest_wrapped_e.value.code == INVALID_PREREQUISITES_RC def test_install_collection(runtime: Runtime) -> None: """Check that valid collection installs do not fail.""" runtime.install_collection("containers.podman:>=1.0") def test_install_collection_dest(runtime: Runtime, tmp_path: pathlib.Path) -> None: """Check that valid collection to custom destination passes.""" runtime.install_collection("containers.podman:>=1.0", destination=tmp_path) expected_file = ( tmp_path / "ansible_collections" / "containers" / "podman" / "MANIFEST.json" ) assert expected_file.is_file() def test_install_collection_fail(runtime: Runtime) -> None: """Check that invalid collection install fails.""" with pytest.raises(AnsibleCompatError) as pytest_wrapped_e: runtime.install_collection("containers.podman:>=9999.0") assert pytest_wrapped_e.type == InvalidPrerequisiteError assert pytest_wrapped_e.value.code == INVALID_PREREQUISITES_RC def test_install_galaxy_role(runtime_tmp: Runtime) -> None: """Check install role with empty galaxy file.""" pathlib.Path(f"{runtime_tmp.project_dir}/galaxy.yml").touch() pathlib.Path(f"{runtime_tmp.project_dir}/meta").mkdir() pathlib.Path(f"{runtime_tmp.project_dir}/meta/main.yml").touch() # this should only raise a warning runtime_tmp._install_galaxy_role(runtime_tmp.project_dir, role_name_check=1) # this shoul test the bypass role name check path runtime_tmp._install_galaxy_role(runtime_tmp.project_dir, role_name_check=2) # this should raise an error with pytest.raises( InvalidPrerequisiteError, match="does not follow current galaxy requirements" ): runtime_tmp._install_galaxy_role(runtime_tmp.project_dir, role_name_check=0) def test_install_galaxy_role_unlink( runtime_tmp: Runtime, caplog: pytest.LogCaptureFixture ) -> None: """Test ability to unlink incorrect symlinked roles.""" caplog.set_level(logging.INFO) runtime_tmp.prepare_environment() pathlib.Path(f"{runtime_tmp.cache_dir}/roles").mkdir(parents=True, exist_ok=True) pathlib.Path(f"{runtime_tmp.cache_dir}/roles/acme.get_rich").symlink_to("/dev/null") pathlib.Path(f"{runtime_tmp.project_dir}/meta").mkdir() pathlib.Path(f"{runtime_tmp.project_dir}/meta/main.yml").write_text( """galaxy_info: role_name: get_rich namespace: acme """, encoding="utf-8", ) runtime_tmp._install_galaxy_role(runtime_tmp.project_dir) assert "symlink to current repository" in caplog.text def test_install_galaxy_role_bad_namespace(runtime_tmp: Runtime) -> None: """Check install role with bad namespace in galaxy info.""" # pathlib.Path(f'{runtime_tmp.project_dir}/galaxy.yml').touch() pathlib.Path(f"{runtime_tmp.project_dir}/meta").mkdir() pathlib.Path(f"{runtime_tmp.project_dir}/meta/main.yml").write_text( """galaxy_info: role_name: foo author: bar namespace: ["xxx"] """ ) # this should raise an error regardless the role_name_check value with pytest.raises(AnsibleCompatError, match="Role namespace must be string, not"): runtime_tmp._install_galaxy_role(runtime_tmp.project_dir, role_name_check=1) def test_install_galaxy_role_no_checks(runtime_tmp: Runtime) -> None: """Check install role with bad namespace in galaxy info.""" runtime_tmp.prepare_environment() pathlib.Path(f"{runtime_tmp.project_dir}/meta").mkdir() pathlib.Path(f"{runtime_tmp.project_dir}/meta/main.yml").write_text( """galaxy_info: role_name: foo author: bar namespace: acme """ ) runtime_tmp._install_galaxy_role(runtime_tmp.project_dir, role_name_check=2) result
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not # use this file except in compliance with the License. A copy of the License # is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """ DataWig SimpleImputer: Uses some simple default encoders and featurizers that usually yield decent imputation quality """ import glob import inspect import json import os import pickle import shutil from typing import List, Dict, Any, Callable import mxnet as mx import pandas as pd import numpy as np from pandas.api.types import is_numeric_dtype from ._hpo import _HPO from .column_encoders import BowEncoder, CategoricalEncoder, NumericalEncoder, TfIdfEncoder from .imputer import Imputer from .mxnet_input_symbols import BowFeaturizer, NumericalFeaturizer from .utils import logger, get_context, random_split, rand_string, flatten_dict, merge_dicts, set_stream_log_level from .imputer import Imputer from .iterators import INSTANCE_WEIGHT_COLUMN class SimpleImputer: """ SimpleImputer model based on n-grams of concatenated strings of input columns and concatenated numerical features, if provided. Given a data frame with string columns, a model is trained to predict observed values in label column using values observed in other columns. The model can then be used to impute missing values. :param input_columns: list of input column names (as strings) :param output_column: output column name (as string) :param output_path: path to store model and metrics :param num_hash_buckets: number of hash buckets used for the n-gram hashing vectorizer, only used for non-numerical input columns, ignored otherwise :param num_labels: number of imputable values considered after, only used for non-numerical input columns, ignored otherwise :param tokens: string, 'chars' or 'words' (default 'chars'), determines tokenization strategy for n-grams, only used for non-numerical input columns, ignored otherwise :param numeric_latent_dim: int, number of latent dimensions for hidden layer of NumericalFeaturizers; only used for numerical input columns, ignored otherwise :param numeric_hidden_layers: number of numeric hidden layers :param is_explainable: if this is True, a stateful tf-idf encoder is used that allows explaining classes and single instances Example usage: from datawig.simple_imputer import SimpleImputer import pandas as pd fn_train = os.path.join(datawig_test_path, "resources", "shoes", "train.csv.gz") fn_test = os.path.join(datawig_test_path, "resources", "shoes", "test.csv.gz") df_train = pd.read_csv(training_data_files) df_test = pd.read_csv(testing_data_files) output_path = "imputer_model" # set up imputer model imputer = SimpleImputer( input_columns=['item_name', 'bullet_point'], output_column='brand') # train the imputer model imputer = imputer.fit(df_train) # obtain imputations imputations = imputer.predict(df_test) """ def __init__(self, input_columns: List[str], output_column: str, output_path: str = "", num_hash_buckets: int = int(2 ** 15), num_labels: int = 100, tokens: str = 'chars', numeric_latent_dim: int = 100, numeric_hidden_layers: int = 1, is_explainable: bool = False) -> None: for col in input_columns: if not isinstance(col, str): raise ValueError("SimpleImputer.input_columns must be str type, was {}".format(type(col))) if not isinstance(output_column, str): raise ValueError("SimpleImputer.output_column must be str type, was {}".format(type(output_column))) self.input_columns = input_columns self.output_column = output_column self.num_hash_buckets = num_hash_buckets self.num_labels = num_labels self.tokens = tokens self.numeric_latent_dim = numeric_latent_dim self.numeric_hidden_layers = numeric_hidden_layers self.output_path = output_path self.imputer = None self.hpo = None self.numeric_columns = [] self.string_columns = [] self.hpo = _HPO() self.is_explainable = is_explainable def check_data_types(self, data_frame: pd.DataFrame) -> None: """ Checks whether a column contains string or numeric data :param data_frame: :return: """ self.numeric_columns = [c for c in self.input_columns if is_numeric_dtype(data_frame[c])] self.string_columns = list(set(self.input_columns) - set(self.numeric_columns)) self.output_type = 'numeric' if is_numeric_dtype(data_frame[self.output_column]) else 'string' logger.debug( "Assuming {} numeric input columns: {}".format(len(self.numeric_columns), ", ".join(self.numeric_columns))) logger.debug("Assuming {} string input columns: {}".format(len(self.string_columns), ", ".join(self.string_columns))) @staticmethod def _is_categorical(col: pd.Series, n_samples: int = 100, max_unique_fraction=0.05) -> bool: """ A heuristic to check whether a column is categorical: a column is considered categorical (as opposed to a plain text column) if the relative cardinality is max_unique_fraction or less. :param col: pandas Series containing strings :param n_samples: number of samples used for heuristic (default: 100) :param max_unique_fraction: maximum relative cardinality. :return: True if the column is categorical according to the heuristic """ sample = col.sample(n=n_samples, replace=len(col) < n_samples).unique() return sample.shape[0] / n_samples < max_unique_fraction def fit_hpo(self, train_df: pd.DataFrame, test_df: pd.DataFrame = None, hps: dict = None, num_evals: int = 10, max_running_hours: float = 96.0, hpo_run_name: str = None, user_defined_scores: list = None, num_epochs: int = None, patience: int = None, test_split: float = .2, weight_decay: List[float] = None, batch_size: int = 16, num_hash_bucket_candidates: List[float] = [2 ** exp for exp in [12, 15, 18]], tokens_candidates: List[str] = ['words', 'chars'], numeric_latent_dim_candidates: List[int] = None, numeric_hidden_layers_candidates: List[int] = None, final_fc_hidden_units: List[List[int]] = None, learning_rate_candidates: List[float] = None, normalize_numeric: bool = True, hpo_max_train_samples: int = None, ctx: mx.context = get_context()) -> Any: """ Fits an imputer model with hyperparameter optimization. The parameter ranges are searched randomly. Grids are specified using the *_candidates arguments (old) or with more flexibility via the dictionary hps. :param train_df: training data as dataframe :param test_df: test data as dataframe; if not provided, a ratio of test_split of the training data are used as test data :param hps: nested dictionary where hps[global][parameter_name] is list of parameters. Similarly, hps[column_name][parameter_name] is a list of parameter values for each input column. Further, hps[column_name]['type'] is in ['numeric', 'categorical', 'string'] and is inferred if not provided. :param num_evals: number of evaluations for random search :param max_running_hours: Time before the hpo run is terminated in hours. :param hpo_run_name: string to identify the current hpo run. :param user_defined_scores: list with entries (Callable, str), where callable is a function accepting **kwargs true, predicted, confidence. Allows custom scoring functions. Below are parameters of the old implementation, kept to ascertain backwards compatibility. :param num_epochs: maximal number of training epochs (default 10) :param patience: used for early stopping; after [patience] epochs with no improvement, training is stopped. (default 3) :param test_split: if no test_df is provided this is the ratio of test data to be held separate for determining model convergence :param weight_decay: regularizer (default 0) :param batch_size (default 16) :param num_hash_bucket_candidates: candidates for gridsearch hyperparameter optimization (default [2**10, 2**13, 2**15, 2**18, 2**20]) :param tokens_candidates: candidates for tokenization (default ['words', 'chars']) :param numeric_latent_dim_candidates: candidates for latent dimensionality of numerical features (default [10, 50, 100]) :param numeric_hidden_layers_candidates: candidates for number of hidden layers of :param final_fc_hidden_units: list of lists w/ dimensions for FC layers after the final concatenation (NOTE: for HPO, this expects a list of lists) :param learning_rate_candidates: learning rate for stochastic gradient descent (default 4e-4) numerical features (default [0, 1, 2]) :param learning_rate_candidates: candidates for learning rate (default [1e-1, 1e-2, 1e-3]) :param normalize_numeric: boolean indicating whether or not to normalize numeric values :param hpo_max_train_samples: training set size for hyperparameter optimization. use is deprecated. :param ctx: List of mxnet contexts (if no gpu's available, defaults to [mx.cpu()]) User can also pass in a list gpus to be used, ex. [mx.gpu(0), mx.gpu(2), mx.gpu(4)] This parameter is deprecated. :return: pd.DataFrame with with hyper-parameter configurations and results """ # generate dictionary with default hyperparameter settings. Overwrite these defaults # with configurations that were passed via this functions API wherever applicable. default_hps = dict() default_hps['global'] = dict() if learning_rate_candidates: default_hps['global']['learning_rate'] = learning_rate_candidates if weight_decay: default_hps['global']['weight_decay'] = weight_decay if num_epochs: default_hps['global']['num_epochs'] = [num_epochs] if patience: default_hps['global']['patience'] = [patience] if batch_size: default_hps['global']['batch_size'] = [batch_size] if final_fc_hidden_units: default_hps['global']['final_fc_hidden_units'] = final_fc_hidden_units default_hps['string'] = {} if num_hash_bucket_candidates: default_hps['string']['max_tokens'] = num_hash_bucket_candidates if tokens_candidates: default_hps['string']['tokens'] = [[c] for c in tokens_candidates] default_hps['categorical'] = {} if num_hash_bucket_candidates: default_hps['categorical']['max_tokens'] = num_hash_bucket_candidates default_hps['numeric'] = {} if normalize_numeric: default_hps['numeric']['normalize'] = [normalize_numeric] if numeric_latent_dim_candidates: default_hps['numeric']['numeric_latent_dim'] = numeric_latent_dim_candidates if numeric_hidden_layers_candidates: default_hps['numeric']['numeric_hidden_layers'] = numeric_hidden_layers_candidates if hps is None: hps = {} # give parameters in `hps` precedence over default parameters parameters_in_both = set(default_hps.keys()).intersection(set(hps.keys())) for param in parameters_in_both: del default_hps[param] hps = merge_dicts(hps, default_hps) if user_defined_scores is None: user_defined_scores = [] if test_df is None: train_df, test_df = random_split(train_df, [1-test_split, test_split]) self.check_data_types(train_df) # infer data types, saved self.string_columns, self.numeric_columns self.hpo.tune(train_df, test_df, hps, num_evals, max_running_hours, user_defined_scores, hpo_run_name, self) self.save() return self def fit(self, train_df: pd.DataFrame, test_df: pd.DataFrame = None, ctx: mx.context = get_context(), learning_rate:
m2/TEU (based on grasshopper layout P. Koster) "pavement": 200, # m2 DUMMY "drainage": 50, # m2 DUMMY "household": 0.1, # moves "digout_margin": 1.2, # percentage "reefer_factor": 2.33, # RHDHV "consumption": 4, # kWh per active reefer "reefer_rack": 3500, # USD "reefers_present": 0.5} # per reefer spot # *** Default inputs: Other_Stack class empty_stack_data = {"name": 'Empty Stack', "ownership": 'Terminal operator', "delivery_time": 1, "lifespan": 40, "mobilisation": 25_000, "maintenance_perc": 0.1, "width": 8, # TEU "height": 6, # TEU "length": 10, # TEU "capacity": 480, # TEU "gross_tgs": 18, # TEU Ground Slot "area_factor": 2.04, # Based on grasshopper layout "pavement": 200, # DUMMY "drainage": 50, "household": 1.05, "digout": 1.05} # DUMMY oog_stack_data = {"name": 'OOG Stack', "ownership": 'Terminal operator', "delivery_time": 1, "lifespan": 40, "mobilisation": 25_000, "maintenance_perc": 0.1, "width": 10, # TEU "height": 1, # TEU "length": 10, # TEU "capacity": 100, # TEU "gross_tgs": 64, # TEU Ground Slot "area_factor": 1.05, # m2/TEU (based on grasshopper layout P. Koster) "pavement": 200, # DUMMY "drainage": 50} # DUMMY # *** Default inputs: Stack_Equipment class rtg_data = {"name": 'RTG', "type": 'rtg', "ownership": 'Terminal operator', "delivery_time": 0, "lifespan": 10, "unit_rate": 1_400_000, "mobilisation": 5000, "maintenance_perc": 0.1, # dummy "insurance_perc": 0, "crew": 1, # dummy "salary": 50_000, # dummy "required": 3, "fuel_consumption": 1, # dummy "power_consumption": 0 } rmg_data = {"name": 'RMG', "type": 'rmg', "ownership": 'Terminal operator', "delivery_time": 0, "lifespan": 10, "unit_rate": 2_500_000, "mobilisation": 5000, "maintenance_perc": 0.1, # dummy "insurance_perc": 0, "crew": 0, # dummy "salary": 50_000, # dummy "required": 1, # one per stack "fuel_consumption": 0, # dummy "power_consumption": 15 # kWh/box move } sc_data = {"name": 'Straddle carrier', "type": 'sc', "ownership": 'Terminal operator', "delivery_time": 0, "lifespan": 10, "unit_rate": 2_000_000, # dummy "mobilisation": 5000, "maintenance_perc": 0.1, # dummy "insurance_perc": 0, "crew": 0, # dummy "salary": 50_000, # dummy "required": 5, "fuel_consumption": 0, # dummy "power_consumption": 30 } rs_data = {"name": 'Reach stacker', "type": 'rs', "ownership": 'Terminal operator', "delivery_time": 0, "lifespan": 10, "unit_rate": 500_000, "mobilisation": 5000, "maintenance_perc": 0.1, # dummy "insurance_perc": 0, "crew": 2, # dummy "salary": 50_000, # dummy "required": 4, "fuel_consumption": 1, # dummy "power_consumption": 0 } # *** Default inputs: Gate class *** gate_data = {"name": 'Gate', "type": 'gate', "ownership": "Terminal operator", "delivery_time": 1, # years "lifespan": 15, # years "unit_rate": 30_000, # USD/gate "mobilisation": 5000, # USD/gate "maintenance_perc": 0.02, "crew": 2, # crew "salary": 30_000, # Dummy "canopy_costs": 250, # USD/m2 # Dummy "area": 288.75, # PIANC WG135 "staff_gates": 1, # "service_gates": 1, # "design_capacity": 0.98, # "exit_inspection_time": 3, # min #dummy "entry_inspection_time": 2, # min #dummy "peak_hour": 0.125, # dummy "peak_day": 0.25, # dummy "peak_factor": 1.2, "truck_moves": 0.75, "operating_days": 7, "capacity": 60} # *** Default inputs: ECH class*** empty_handler_data = {"name": 'Empty Handler', "type": 'empty_handler', "ownership": "Terminal operator", "delivery_time": 1, "lifespan": 15, "unit_rate": 500_000, "mobilisation": 5000, "maintenance_perc": 0.02, "crew": 1, "salary": 35_000, # dummy "fuel_consumption": 1.5, "required": 5} # *** Default inputs: Commodity class *** container_data = {"name": 'Laden', "handling_fee": 150, "fully_cellular_perc": 0, "panamax_perc": 0, "panamax_max_perc": 0, "post_panamax_I_perc": 0, "post_panamax_II_perc": 0, "new_panamax_perc": 100, "VLCS_perc": 0, "ULCS_perc": 0} # *** Default inputs: Vessel class *** (Source: i) The Geography of Transport Systems, <NAME> (2017), ii) UNCTAD) fully_cellular_data = {"name": 'Fully_Cellular_1', "type": 'Fully_Cellular', "delivery_time": 0, # years "call_size": 2500 / 8, # TEU "LOA": 215, # m "draught": 10.0, # m "beam": 20.0, # m "max_cranes": 4, # STS cranes "all_turn_time": 31, # todo source "mooring_time": 6, # berthing + deberthing time "demurrage_rate": 730, # USD todo edit "transport_costs": 200, # USD per TEU, RHDHV "all_in_transport_costs": 2128 # USD per TEU, Ports and Terminals p.158 } panamax_data = {"name": 'Panamax_1', "type": 'Panamax', "delivery_time": 0, # years "call_size": 3400 / 8, # TEU "LOA": 250, # m "draught": 12.5, # m "beam": 32.2, # m "max_cranes": 4, # STS cranes "all_turn_time": 31, # todo source [hr] "mooring_time": 6, # berthing + deberthing time [hr] "demurrage_rate": 730, # USD todo edit "transport_costs": 180, # USD per TEU, RHDHV "all_in_transport_costs": 1881 # USD per TEU, Ports and Terminals p.158 } panamax_max_data = {"name": 'Panamax_Max_1', "type": 'Panamax_Max', "delivery_time": 0, # years "call_size": 4500 / 8, # TEU "LOA": 290, # m "draught": 12.5, # m "beam": 32.0, # m "max_cranes": 4, # STS cranes "all_turn_time": 31, # todo source [hr] "mooring_time": 2, # berthing + deberthing time [hr] "demurrage_rate": 730, # USD todo edit "transport_costs": 160, # USD per TEU, RHDHV "all_in_transport_costs": 1682 # USD per TEU, Ports and Terminals p.158 } post_panamax_I_data = {"name": 'Post_Panamax_I_1', "type": 'Post_Panamax_I', "delivery_time": 0, # years "call_size": 6000 / 8, # TEU "LOA": 300, # m "draught": 13.0, # m "beam": 40.0, # m "max_cranes": 4, # STS cranes "all_turn_time": 31, # todo source [hr] "mooring_time": 2, # berthing + deberthing time [hr] "demurrage_rate": 730, # USD todo edit "transport_costs": 150, # USD per TEU, RHDHV "all_in_transport_costs": 1499 # USD per TEU, Ports and Terminals p.158 } post_panamax_II_data = {"name": 'Post_Panamax_II_1', "type": 'Post_Panamax_II', "delivery_time": 0, # years "call_size": 8500 / 8, # TEU "LOA": 340, # m "draught": 14.5, # m "beam": 43.0, # m "max_cranes": 4, # STS cranes "all_turn_time": 31, # todo source [hr] "mooring_time": 2, # berthing + deberthing time [hr] "demurrage_rate": 730, # USD todo edit "transport_costs": 140, # USD per TEU, RHDHV "all_in_transport_costs": 1304 # USD per TEU, Ports and Terminals p.158 } new_panamax_data = {"name": 'New_Panamax_1', "type": 'New_Panamax', "delivery_time": 0, # years "call_size": 12500 / 8, # TEU "LOA": 366, # m "draught": 15.2, # m "beam": 49.0, # m "max_cranes": 4, # STS cranes "all_turn_time": 31, # todo source [hr] "mooring_time": 6, # berthing + deberthing time [hr] "demurrage_rate": 730, # USD todo edit "transport_costs": 120, # USD per TEU, RHDHV "all_in_transport_costs": 1118 # USD per TEU, Ports and Terminals p.158 } VLCS_data = {"name": 'VLCS_1', "type": 'VLCS', "delivery_time": 0, # years "call_size": 15000 / 8, # TEU "LOA": 397, # m "draught": 15.5, # m "beam": 56.0, # m "max_cranes": 4, # STS cranes "all_turn_time": 31, # todo source [hr] "mooring_time": 4, # berthing + deberthing time [hr] "demurrage_rate": 730, # USD todo edit "transport_costs": 80, # USD per TEU, RHDHV "all_in_transport_costs": 2128 # USD per TEU, Ports and Terminals p.158 } ULCS_data = {"name": 'ULCS_1', "type": 'ULCS', "delivery_time": 0, # years "call_size": 21000 / 8, # TEU "LOA": 400, # m "draught": 16.0, # m "beam": 59.0, # m "max_cranes": 4, # STS cranes "all_turn_time": 31, # todo source [hr] "mooring_time": 4, # berthing + deberthing time [hr] "demurrage_rate": 730, # USD todo edit "transport_costs": 60, # USD per TEU, RHDHV "all_in_transport_costs": 908 # USD per TEU, Ports and Terminals p.158 } # *** Default inputs: Barge class *** # todo add sources small_barge_data = {"name": 'Small_Barge_1', "type": 'small', "ownership": 'Port authority', "delivery_time": 1, # years "lifespan": 10, # years "call_size": 200, # TEU "LOA": 90, # m "draught": 4.5, # m "beam": 12.0, # m "unit_rate": 1_000_000, # USD per barge "operations_perc": 0.10, "maintenance_perc": 0.10, "insurance_perc": 0.01, "mooring_time": 6, # berthing + deberthing time "transport_costs": 200} # USD per TEU medium_barge_data = {"name": 'Medium_Barge_1', "type": 'medium', "ownership": 'Port authority', "delivery_time": 1, # years "lifespan": 10, # years "call_size": 250, # TEU "LOA": 100, # m "draught": 5.0, # m "beam": 13.0, # m "unit_rate": 1_000_000, # USD per barge "operations_perc": 0.10, "maintenance_perc": 0.10, "insurance_perc": 0.01, "mooring_time": 6, # berthing + deberthing time "transport_costs": 200} # USD per TEU large_barge_data = {"name": 'Large_Barge_1', "type": 'large', "ownership": 'Port authority', "delivery_time": 1, # years "lifespan": 10, # years "call_size": 300, # TEU "LOA": 120, # m "draught": 5.5, # m "beam": 14.0, # m "unit_rate": 1_000_000, # USD per barge "operations_perc": 0.10, "maintenance_perc": 0.10, "insurance_perc": 0.01, "mooring_time": 6, # berthing + deberthing time "transport_costs": 200} # USD per TEU truck_data = {"name": 'Truck', "ownership": 'Port authority', "delivery_time": 1, "lifespan": 10, "unit_rate": 10_000, # USD per truck "operations_perc": 0.10, "maintenance_perc": 0.10, "insurance_perc": 0.01} # *** Default inputs: Labour class *** labour_data = {"name": 'Labour', "international_salary": 105_000, "international_staff": 4, "local_salary": 18_850, "local_staff": 10, "operational_salary": 16_750, "shift_length": 6.5,
line, 'max', get_text_content(hack_file_text_box)) hack_file_text_box.mark_set(tk.INSERT, new_cursor) highlight_stuff(widget, skip_moving_cursor=center, ctrl_held=ctrl_held) updateWindowScrollbarPos() def navigation_callback(address): widget = check_widget(window.focus_get()) if not address and widget: widget.focus_force() try: address = deci(address) if disasm.game_address_mode: address -= disasm.game_offset address >>= 2 except: if widget: widget.focus_force() return apply_hack_changes() apply_comment_changes() reset_target() navigate_to(address, center=True, widget=widget, region_treatment=True) if widget: widget.focus_force() def navigation_prompt(root=window): if not disassembler_loaded(): return address = simpledialog.askstring('Navigate to address', '', parent=root) if not address: return navigation_callback(address) def scroll_callback(event,numUnits=1): if not disassembler_loaded(): return apply_hack_changes() apply_comment_changes() direction = -app_config['scroll_amount'] if event.delta > 0 else app_config['scroll_amount'] navigate_to(navigation + direction * numUnits, widget=check_widget(window.focus_get()), ctrl_held=ctrl_on_press) def save_changes_to_file(save_as=False): if not disassembler_loaded(): return False apply_hack_changes() apply_comment_changes() # Do not save changes if there are errors for key in user_errors: navigate_to(int(key), widget=hack_file_text_box, center=True) return False if app_config['calc_crc'][disasm.hack_file_name]: status_text.set('Calculating checksum...') window.update() sum1, sum2 = disasm.calc_checksum() else: sum1 = sum2 = 0 if app_config['calc_crc'][disasm.hack_file_name] and navigation <= disasm.header_items['CRC2'][0] >> 2: navigate_to(navigation) if save_as: new_file_name = filedialog.asksaveasfilename(initialdir = app_config['previous_hack_location'], title = 'Save as...') if not new_file_name: return False new_file_path = os.path.realpath(new_file_name) if new_file_path == disasm.base_folder + disasm.base_file_name: simpledialog.messagebox._show('Wait a sec', 'You shouldn\'t select the base file') return False new_file_name = new_file_path[new_file_path.rfind('\\') + 1:] if not '.' in new_file_name: dot = disasm.hack_file_name.rfind('.') new_file_name += disasm.hack_file_name[dot:] new_dir = new_file_path[:new_file_path.rfind('\\') + 1] new_file_path = new_dir + new_file_name if exists(new_file_path): simpledialog.messagebox._show('Sorry', 'That file already exists.') return False app_config['previous_hack_location'] = new_dir app_config['previous_hack_opened'] = new_file_path app_config['hack_of_base'][new_file_name] = app_config['hack_of_base'][disasm.hack_file_name] app_config['calc_crc'][new_file_name] = app_config['calc_crc'][disasm.hack_file_name] app_config['memory_regions'][new_file_name] = app_config['memory_regions'][disasm.hack_file_name].copy() app_config['remember_batch'][new_file_name] = app_config['remember_batch'][disasm.hack_file_name] app_config['remember_script'][new_file_name] = app_config['remember_script'][disasm.hack_file_name] disasm.hack_file_name = new_file_name disasm.comments_file = new_file_path + ' comments.txt' disasm.jumps_file = new_file_path + ' jumps.data' window.title('ROM Disassembler - ' + disasm.hack_file_name) app_config['CIC'][disasm.hack_file_name] = disasm.cic app_config['jumps_displaying'][disasm.hack_file_name] = jumps_displaying.copy() app_config['game_address_mode'][disasm.hack_file_name] = disasm.game_address_mode save_config() with open(disasm.jumps_file, 'wb') as jumps_file: dump((disasm.jumps_to, disasm.branches_to, disasm.jalr_list), jumps_file) with open(disasm.hack_folder + disasm.hack_file_name, 'wb') as file: file.write(disasm.hack_file) _filename = disasm.comments_file + '(Backup ' if exists(disasm.comments_file): i = 0 while True: i += 1 if not exists(_filename + str(i) + ').txt'): _filename += str(i) + ').txt' with open(_filename, 'w') as backup_comments_file: with open(disasm.comments_file, 'r') as comments_file: backup_comments_file.write(comments_file.read()) break try: with open(disasm.comments_file, 'w') as file: file.write(dict_to_string(disasm.comments)) if _filename != disasm.comments_file + '(Backup ': os.remove(_filename) except Exception as e: simpledialog.messagebox._show('Error', 'There was trouble saving your comments file. ' 'A backup of your old comments can be found next to the original comments file. ' 'Your rom file was saved without error.' '\n\n' + str(e)) checksum_text = ' Checksum calculated - CRC1: {} | CRC2: {}'.format( extend_zeroes(hexi(sum1), 8), extend_zeroes(hexi(sum2), 8)) message = 'Rom Saved.' if app_config['calc_crc'][disasm.hack_file_name]: message += checksum_text wait_ctrl_release(lambda: status_text.set(message)) return True def destroy_them(not_main=False): global colours_window, jumps_window, comments_window, dimension_window, manual_cic_win global changes_win, opcodes_win, script_win, phrases_win, mem_regions_win, hex_win if changes_win: changes_win.destroy() changes_win = None if jumps_window: jumps_window.destroy() jumps_window = None if comments_window: comments_window.destroy() comments_window = None if dimension_window: dimension_window.destroy() dimension_window = None if manual_cic_win: manual_cic_win.destroy() manual_cic_win = None if script_win: script_win.destroy() script_win = None if phrases_win: phrases_win.destroy() phrases_win = None if mem_regions_win: mem_regions_win.destroy() mem_regions_win = None if not not_main: if hex_win: hex_win.destroy() hex_win = None if colours_window: colours_window.destroy() colours_window = None if opcodes_win: opcodes_win.destroy() opcodes_win = None window.destroy() def close_window(side = 'right'): if (not app_config['prompt_save_on_exit'] or not disassembler_loaded()): destroy_them() return close_win_width = 270 close_win_height = 45 close_win_y_offset = 130 win_w, win_h, win_x, win_y = geometry(window.geometry()) placement_x = ((win_w if side == 'right' else close_win_width) + win_x) - close_win_width placement_y = (close_win_y_offset + win_y) - close_win_height close_win_geo = '{}x{}+{}+{}'.format(close_win_width, close_win_height, placement_x, placement_y) close_win = tk.Tk() close_win.geometry(close_win_geo) close_win.title('Exit') label = tk.Label(close_win, text = 'Save work?').place(x = 150, y = 12) def yes_button_callback(): if save_changes_to_file(): destroy_them() close_win.destroy() yes_button = tk.Button(close_win, text='Yes',command = yes_button_callback) no_button = tk.Button(close_win, text='No',command = lambda:\ (destroy_them(), close_win.destroy())) yes_button.place(x=10, y=10, width=50) no_button.place(x=75, y=10, width=50) def cancel_close_win(): close_win.destroy() close_win.protocol('WM_DELETE_WINDOW', cancel_close_win) close_win.bind('<FocusOut>', lambda _: close_win.destroy()) close_win.focus_force() close_win.resizable(False, False) close_win.mainloop() def open_files(mode = ''): global disasm, jumps_displaying if disassembler_loaded(): if not save_changes_to_file(): return disasm = None jumps_displaying = {} reset_target() target_of_down_label.place_forget() target_of_up_label.place_forget() [text_box.delete('1.0', tk.END) for text_box in ALL_TEXT_BOXES] window.title('ROM Disassembler') [text_box.configure(state=tk.NORMAL) for text_box in ALL_TEXT_BOXES] destroy_change_rom_name_button() destroy_them(not_main=True) # Set data for rest of this function if mode == 'new': base_title = 'Select the original base rom' hack_title = 'Choose location and name for the new hacked rom' hack_dialog_function = filedialog.asksaveasfilename else: base_title = 'Select the base rom' hack_title = 'Select the hacked rom' hack_dialog_function = filedialog.askopenfilename base_dir = '' base_file_path = '' # Obtain file locations from user input if app_config['open_roms_automatically'] and app_config['previous_base_opened'] and not mode: base_file_path = app_config['previous_base_opened'] if mode == 'new': base_file_path = filedialog.askopenfilename(initialdir = app_config['previous_base_location'], title = base_title) if not base_file_path: return base_file_path = os.path.realpath(base_file_path) base_dir = base_file_path[:base_file_path.rfind('\\') + 1] hack_dir = base_dir if mode == 'new' else app_config['previous_hack_location'] if app_config['open_roms_automatically'] and app_config['previous_hack_opened'] and not mode: hack_file_path = app_config['previous_hack_opened'] else: hack_file_path = hack_dialog_function(initialdir = hack_dir, title = hack_title) if not hack_file_path: return hack_file_path = os.path.realpath(hack_file_path) hack_dir = hack_file_path[:hack_file_path.rfind('\\') + 1] if mode == 'existing': hack_name = hack_file_path[hack_file_path.rfind('\\') + 1:] if hack_name not in app_config['hack_of_base']: app_config['hack_of_base'][hack_name] = '' if not exists(app_config['hack_of_base'][hack_name]): base_file_path = filedialog.askopenfilename(initialdir=hack_dir, title= 'There is no associated base rom. Select it now.') if not base_file_path: return base_file_path = os.path.realpath(base_file_path) if base_file_path == hack_file_path: simpledialog.messagebox._show('Wait a sec', 'You shouldn\'t choose the same files.') return else: base_file_path = app_config['hack_of_base'][hack_name] base_dir = base_file_path[:base_file_path.rfind('\\') + 1] base_dot = base_file_path.rfind('.') file_extension = base_file_path[base_dot + 1:] if not '.' in hack_file_path[hack_file_path.rfind('\\'):]: hack_file_path += '.' + file_extension if mode == 'new': if os.path.exists(hack_file_path): simpledialog.messagebox._show('Sorry', 'That file already exists') return # else: # with open(base_file_path, 'rb') as base_file: # with open(hack_file_path, 'wb') as hack_file: # hack_file.write(base_file.read()) timer_reset() # Remember dirs for next browse app_config['previous_base_location'] = base_dir app_config['previous_hack_location'] = hack_dir save_config() # Initialise disassembler with paths to the 2 files, apply saved settings from app_config try: disasm = Disassembler(base_file_path, hack_file_path, window, status_text) if disasm.hack_file_name not in app_config['game_address_mode']: app_config['game_address_mode'][disasm.hack_file_name] = False disasm.game_address_mode = app_config['game_address_mode'][disasm.hack_file_name] disasm.immediate_identifier = app_config['immediate_identifier'] updateToggleAddressLabel() except Exception as e: simpledialog.messagebox._show('Error', e) base_file_text_box.delete('1.0', tk.END) hack_file_text_box.delete('1.0', tk.END) [text_box.config(state=tk.DISABLED) for text_box in ALL_TEXT_BOXES] disasm = None return app_config['hack_of_base'][disasm.hack_file_name] = \ app_config['previous_base_opened'] = disasm.base_folder + disasm.base_file_name app_config['previous_hack_opened'] = disasm.hack_folder + disasm.hack_file_name save_config() if disasm.hack_file_name not in app_config['CIC']: bne_1 = 'BNE A3, T0' bne_2 = 'BNE S0, T0' places = [0x670, 0x66C, 0x63C, 0x77C] cics = ['6101', '6102', '6103', '6105'] new_cic = '6105' for i in range(4): navi_1, navi_2 = places[i], places[i] + 12 place_1 = navi_1 >> 2 place_2 = navi_2 >> 2 inst_1 = disasm.decode(int_of_4_byte_aligned_region(disasm.base_file[navi_1:navi_1+4]), place_1) inst_2 = disasm.decode(int_of_4_byte_aligned_region(disasm.base_file[navi_2:navi_2+4]), place_2) if bne_1 == inst_1[:10] and bne_2 == inst_2[:10]: if cics[i] == '6101': simpledialog.messagebox._show('Ahh..', 'I\'ve been waiting for you, starfox.\n\n For this game, ' 'you may or may not need to bypass the CRC (in the tools menu).') else: new_cic = cics[i] break if i == 3: load_instruction = lambda i: disasm.decode(int_of_4_byte_aligned_region(disasm.hack_file[i:i+4]), i >> 2) targ_instructions = { '40': 'MTC0 R0, CAUSE', '180': 'SW V0, $0000 (SP)', '8F4': 'MFLO K0', 'A8C': 'BNE A1, K1, $00000A9C' } is_6106 = True for i in targ_instructions: navi = deci(i) inst = load_instruction(navi) if inst != targ_instructions[i]: is_6106 = False if is_6106: new_cic = '6106' else: simpledialog.messagebox._show('Warning', 'Could not determine CIC chip. ' 'Defaulting to most common chip.\n' 'Rom may get stuck in infinite loop ' 'while booting. If the rom won\'t boot, ' 'try bypassing the CRC with "Tools->' 'Bypass CRC". If that doesn\'t work and ' 'fiddling around with manually setting the ' 'CIC chip also doesn\'t work, then I\'m sorry, ' 'I dunno man.') app_config['CIC'][disasm.hack_file_name] = CIC[new_cic] app_config['calc_crc'][disasm.hack_file_name] = True calc_crc.set(app_config['calc_crc'][disasm.hack_file_name]) disasm.set_cic(app_config['CIC'][disasm.hack_file_name]) hack_file_text_box.insert('1.0', 'Mapping jumps and branches...\nPlease wait...') comments_text_box.insert('1.0', 'This may take a while with larger roms.\nThis only has to be done once per rom.\n\n' 'If your window is not responding - don\'t worry.') window.update() def rest_of_function(): global jumps_displaying window.title('ROM Disassembler - ' + disasm.hack_file_name) if disasm.hack_file_name not in app_config['memory_regions']: app_config['memory_regions'][disasm.hack_file_name] = [] else: disasm.memory_regions = app_config['memory_regions'][disasm.hack_file_name].copy() disasm.map_jumps(address_text_box) [text_box.delete('1.0',tk.END) for text_box in ALL_TEXT_BOXES] window.update() disasm.loaded = True # Navigate user to first line of code, start the undo buffer with the current data on screen navigate_to(0) buffer_append(hack_buffer) buffer_append(comments_buffer) if disasm.hack_file_name not in app_config['jumps_displaying']: app_config['jumps_displaying'][disasm.hack_file_name] = {} if disasm.hack_file_name not
# -*- coding: utf-8 -*- """ tests.http ~~~~~~~~~~ HTTP parsing utilities. :copyright: 2007 Pallets :license: BSD-3-Clause """ from datetime import datetime import pytest from . import strict_eq from werkzeug import datastructures from werkzeug import http from werkzeug._compat import itervalues from werkzeug._compat import wsgi_encoding_dance from werkzeug.test import create_environ class TestHTTPUtility(object): def test_accept(self): a = http.parse_accept_header("en-us,ru;q=0.5") assert list(itervalues(a)) == ["en-us", "ru"] assert a.best == "en-us" assert a.find("ru") == 1 pytest.raises(ValueError, a.index, "de") assert a.to_header() == "en-us,ru;q=0.5" def test_mime_accept(self): a = http.parse_accept_header( "text/xml,application/xml," "application/xhtml+xml," "application/foo;quiet=no; bar=baz;q=0.6," "text/html;q=0.9,text/plain;q=0.8," "image/png,*/*;q=0.5", datastructures.MIMEAccept, ) pytest.raises(ValueError, lambda: a["missing"]) assert a["image/png"] == 1 assert a["text/plain"] == 0.8 assert a["foo/bar"] == 0.5 assert a["application/foo;quiet=no; bar=baz"] == 0.6 assert a[a.find("foo/bar")] == ("*/*", 0.5) def test_accept_matches(self): a = http.parse_accept_header( "text/xml,application/xml,application/xhtml+xml," "text/html;q=0.9,text/plain;q=0.8," "image/png", datastructures.MIMEAccept, ) assert ( a.best_match(["text/html", "application/xhtml+xml"]) == "application/xhtml+xml" ) assert a.best_match(["text/html"]) == "text/html" assert a.best_match(["foo/bar"]) is None assert a.best_match(["foo/bar", "bar/foo"], default="foo/bar") == "foo/bar" assert a.best_match(["application/xml", "text/xml"]) == "application/xml" def test_accept_mime_specificity(self): a = http.parse_accept_header( "text/*, text/html, text/html;level=1, */*", datastructures.MIMEAccept ) assert a.best_match(["text/html; version=1", "text/html"]) == "text/html" assert a.best_match(["text/html", "text/html; level=1"]) == "text/html; level=1" def test_charset_accept(self): a = http.parse_accept_header( "ISO-8859-1,utf-8;q=0.7,*;q=0.7", datastructures.CharsetAccept ) assert a["iso-8859-1"] == a["iso8859-1"] assert a["iso-8859-1"] == 1 assert a["UTF8"] == 0.7 assert a["ebcdic"] == 0.7 def test_language_accept(self): a = http.parse_accept_header( "de-AT,de;q=0.8,en;q=0.5", datastructures.LanguageAccept ) assert a.best == "de-AT" assert "de_AT" in a assert "en" in a assert a["de-at"] == 1 assert a["en"] == 0.5 def test_set_header(self): hs = http.parse_set_header('foo, Bar, "Blah baz", Hehe') assert "blah baz" in hs assert "foobar" not in hs assert "foo" in hs assert list(hs) == ["foo", "Bar", "Blah baz", "Hehe"] hs.add("Foo") assert hs.to_header() == 'foo, Bar, "Blah baz", Hehe' def test_list_header(self): hl = http.parse_list_header("foo baz, blah") assert hl == ["foo baz", "blah"] def test_dict_header(self): d = http.parse_dict_header('foo="bar baz", blah=42') assert d == {"foo": "bar baz", "blah": "42"} def test_cache_control_header(self): cc = http.parse_cache_control_header("max-age=0, no-cache") assert cc.max_age == 0 assert cc.no_cache cc = http.parse_cache_control_header( 'private, community="UCI"', None, datastructures.ResponseCacheControl ) assert cc.private assert cc["community"] == "UCI" c = datastructures.ResponseCacheControl() assert c.no_cache is None assert c.private is None c.no_cache = True assert c.no_cache == "*" c.private = True assert c.private == "*" del c.private assert c.private is None assert c.to_header() == "no-cache" def test_csp_header(self): csp = http.parse_csp_header( "default-src 'self'; script-src 'unsafe-inline' *; img-src" ) assert csp.default_src == "'self'" assert csp.script_src == "'unsafe-inline' *" assert csp.img_src is None def test_authorization_header(self): a = http.parse_authorization_header("Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==") assert a.type == "basic" assert a.username == u"Aladdin" assert a.password == u"<PASSWORD>" a = http.parse_authorization_header( "Basic 0YDRg9GB0YHQutC40IE60JHRg9C60LLRiw==" ) assert a.type == "basic" assert a.username == u"русскиЁ" assert a.password == u"<PASSWORD>" a = http.parse_authorization_header("Basic 5pmu6YCa6K+dOuS4reaWhw==") assert a.type == "basic" assert a.username == u"普通话" assert a.password == u"中文" a = http.parse_authorization_header( '''Digest username="Mufasa", realm="<EMAIL>", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", uri="/dir/index.html", qop=auth, nc=00000001, cnonce="0a4f113b", response="6629fae49393a05397450978507c4ef1", opaque="5ccc069c403ebaf9f0171e9517f40e41"''' ) assert a.type == "digest" assert a.username == "Mufasa" assert a.realm == "<EMAIL>" assert a.nonce == "dcd98b7102dd2f0e8b11d0f600bfb0c093" assert a.uri == "/dir/index.html" assert a.qop == "auth" assert a.nc == "00000001" assert a.cnonce == "0a4f113b" assert a.response == "6629fae49393a05397450978507c4ef1" assert a.opaque == "5ccc069c403ebaf9f0171e9517f40e41" a = http.parse_authorization_header( '''Digest username="Mufasa", realm="<EMAIL>", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", uri="/dir/index.html", response="e257afa1414a3340d93d30955171dd0e", opaque="5ccc069c403ebaf9f0171e9517f40e41"''' ) assert a.type == "digest" assert a.username == "Mufasa" assert a.realm == "<EMAIL>" assert a.nonce == "dcd98b7102dd2f0e8b11d0f600bfb0c093" assert a.uri == "/dir/index.html" assert a.response == "e257afa1414a3340d93d30955171dd0e" assert a.opaque == "5ccc069c403ebaf9f0171e9517f40e41" assert http.parse_authorization_header("") is None assert http.parse_authorization_header(None) is None assert http.parse_authorization_header("foo") is None def test_www_authenticate_header(self): wa = http.parse_www_authenticate_header('Basic realm="WallyWorld"') assert wa.type == "basic" assert wa.realm == "WallyWorld" wa.realm = "Foo Bar" assert wa.to_header() == 'Basic realm="Foo Bar"' wa = http.parse_www_authenticate_header( '''Digest realm="<EMAIL>", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41"''' ) assert wa.type == "digest" assert wa.realm == "<EMAIL>" assert "auth" in wa.qop assert "auth-int" in wa.qop assert wa.nonce == "dcd98b7102dd2f0e8b11d0f600bfb0c093" assert wa.opaque == "5ccc069c403ebaf9f0171e9517f40e41" wa = http.parse_www_authenticate_header("broken") assert wa.type == "broken" assert not http.parse_www_authenticate_header("").type assert not http.parse_www_authenticate_header("") def test_etags(self): assert http.quote_etag("foo") == '"foo"' assert http.quote_etag("foo", True) == 'W/"foo"' assert http.unquote_etag('"foo"') == ("foo", False) assert http.unquote_etag('W/"foo"') == ("foo", True) es = http.parse_etags('"foo", "bar", W/"baz", blar') assert sorted(es) == ["bar", "blar", "foo"] assert "foo" in es assert "baz" not in es assert es.contains_weak("baz") assert "blar" in es assert es.contains_raw('W/"baz"') assert es.contains_raw('"foo"') assert sorted(es.to_header().split(", ")) == [ '"bar"', '"blar"', '"foo"', 'W/"baz"', ] def test_etags_nonzero(self): etags = http.parse_etags('W/"foo"') assert bool(etags) assert etags.contains_raw('W/"foo"') def test_parse_date(self): assert http.parse_date("Sun, 06 Nov 1994 08:49:37 GMT ") == datetime( 1994, 11, 6, 8, 49, 37 ) assert http.parse_date("Sunday, 06-Nov-94 08:49:37 GMT") == datetime( 1994, 11, 6, 8, 49, 37 ) assert http.parse_date(" Sun Nov 6 08:49:37 1994") == datetime( 1994, 11, 6, 8, 49, 37 ) assert http.parse_date("foo") is None def test_parse_date_overflows(self): assert http.parse_date(" Sun 02 Feb 1343 08:49:37 GMT") == datetime( 1343, 2, 2, 8, 49, 37 ) assert http.parse_date("Thu, 01 Jan 1970 00:00:00 GMT") == datetime( 1970, 1, 1, 0, 0 ) assert http.parse_date("Thu, 33 Jan 1970 00:00:00 GMT") is None def test_remove_entity_headers(self): now = http.http_date() headers1 = [ ("Date", now), ("Content-Type", "text/html"), ("Content-Length", "0"), ] headers2 = datastructures.Headers(headers1) http.remove_entity_headers(headers1) assert headers1 == [("Date", now)] http.remove_entity_headers(headers2) assert headers2 == datastructures.Headers([(u"Date", now)]) def test_remove_hop_by_hop_headers(self): headers1 = [("Connection", "closed"), ("Foo", "bar"), ("Keep-Alive", "wtf")] headers2 = datastructures.Headers(headers1) http.remove_hop_by_hop_headers(headers1) assert headers1 == [("Foo", "bar")] http.remove_hop_by_hop_headers(headers2) assert headers2 == datastructures.Headers([("Foo", "bar")]) def test_parse_options_header(self): assert http.parse_options_header(None) == ("", {}) assert http.parse_options_header("") == ("", {}) assert http.parse_options_header(r'something; foo="other\"thing"') == ( "something", {"foo": 'other"thing'}, ) assert http.parse_options_header(r'something; foo="other\"thing"; meh=42') == ( "something", {"foo": 'other"thing', "meh": "42"}, ) assert http.parse_options_header( r'something; foo="other\"thing"; meh=42; bleh' ) == ("something", {"foo": 'other"thing', "meh": "42", "bleh": None}) assert http.parse_options_header( 'something; foo="other;thing"; meh=42; bleh' ) == ("something", {"foo": "other;thing", "meh": "42", "bleh": None}) assert http.parse_options_header('something; foo="otherthing"; meh=; bleh') == ( "something", {"foo": "otherthing", "meh": None, "bleh": None}, ) # Issue #404 assert http.parse_options_header( 'multipart/form-data; name="foo bar"; ' 'filename="bar foo"' ) == ("multipart/form-data", {"name": "foo bar", "filename": "bar foo"}) # Examples from RFC assert http.parse_options_header("audio/*; q=0.2, audio/basic") == ( "audio/*", {"q": "0.2"}, ) assert http.parse_options_header( "audio/*; q=0.2, audio/basic", multiple=True ) == ("audio/*", {"q": "0.2"}, "audio/basic", {}) assert http.parse_options_header( "text/plain; q=0.5, text/html\n text/x-dvi; q=0.8, text/x-c", multiple=True, ) == ( "text/plain", {"q": "0.5"}, "text/html", {}, "text/x-dvi", {"q": "0.8"}, "text/x-c", {}, ) assert http.parse_options_header( "text/plain; q=0.5, text/html\n text/x-dvi; q=0.8, text/x-c" ) == ("text/plain", {"q": "0.5"}) # Issue #932 assert http.parse_options_header( "form-data; name=\"a_file\"; filename*=UTF-8''" '"%c2%a3%20and%20%e2%82%ac%20rates"' ) == ("form-data", {"name": "a_file", "filename": u"\xa3 and \u20ac rates"}) assert http.parse_options_header( "form-data; name*=UTF-8''\"%C5%AAn%C4%ADc%C5%8Dde%CC%BD\"; " 'filename="some_file.txt"' ) == ( "form-data", {"name": u"\u016an\u012dc\u014dde\u033d", "filename": "some_file.txt"}, ) def test_parse_options_header_value_with_quotes(self): assert http.parse_options_header( 'form-data; name="file"; filename="t\'es\'t.txt"' ) == ("form-data", {"name": "file", "filename": "t'es't.txt"}) assert http.parse_options_header( "form-data; name=\"file\"; filename*=UTF-8''\"'🐍'.txt\"" ) == ("form-data", {"name": "file", "filename": u"'🐍'.txt"}) def test_parse_options_header_broken_values(self): # Issue #995 assert http.parse_options_header(" ") == ("", {}) assert http.parse_options_header(" , ") == ("", {}) assert http.parse_options_header(" ; ") == ("", {}) assert http.parse_options_header(" ,; ") == ("", {}) assert http.parse_options_header(" , a ") == ("", {}) assert http.parse_options_header(" ; a ") == ("", {}) def test_dump_options_header(self): assert http.dump_options_header("foo", {"bar": 42}) == "foo; bar=42" assert http.dump_options_header("foo", {"bar": 42, "fizz": None}) in ( "foo; bar=42; fizz", "foo; fizz; bar=42", ) def test_dump_header(self): assert http.dump_header([1, 2, 3]) == "1, 2, 3" assert http.dump_header([1, 2, 3], allow_token=False) == '"1", "2", "3"' assert http.dump_header({"foo": "bar"}, allow_token=False) == 'foo="bar"' assert http.dump_header({"foo": "bar"}) == "foo=bar" def test_is_resource_modified(self): env = create_environ() # any method is allowed env["REQUEST_METHOD"] = "POST" assert http.is_resource_modified(env, etag="testing") env["REQUEST_METHOD"] = "GET" # etagify from data pytest.raises(TypeError, http.is_resource_modified, env, data="42", etag="23") env["HTTP_IF_NONE_MATCH"] = http.generate_etag(b"awesome") assert not http.is_resource_modified(env, data=b"awesome") env["HTTP_IF_MODIFIED_SINCE"] = http.http_date(datetime(2008, 1, 1, 12, 30)) assert not http.is_resource_modified( env, last_modified=datetime(2008, 1, 1, 12, 00) ) assert http.is_resource_modified( env, last_modified=datetime(2008, 1, 1, 13, 00) ) def test_is_resource_modified_for_range_requests(self): env = create_environ() env["HTTP_IF_MODIFIED_SINCE"] = http.http_date(datetime(2008, 1, 1, 12, 30)) env["HTTP_IF_RANGE"] = http.generate_etag(b"awesome_if_range") # Range header not present, so If-Range should be ignored assert not http.is_resource_modified( env, data=b"not_the_same", ignore_if_range=False, last_modified=datetime(2008, 1, 1, 12, 30), ) env["HTTP_RANGE"] = "" assert not http.is_resource_modified( env, data=b"awesome_if_range", ignore_if_range=False ) assert http.is_resource_modified( env, data=b"not_the_same", ignore_if_range=False ) env["HTTP_IF_RANGE"] = http.http_date(datetime(2008, 1, 1, 13, 30)) assert http.is_resource_modified( env, last_modified=datetime(2008, 1, 1, 14, 00), ignore_if_range=False ) assert not http.is_resource_modified( env, last_modified=datetime(2008, 1, 1, 13, 30), ignore_if_range=False
# -*- coding: utf-8 -*- """ ========== """ # import standard libraries import os # import third-party libraries import numpy as np import matplotlib.pyplot as plt from colour import write_image, read_image # import my libraries import test_pattern_generator2 as tpg import transfer_functions as tf import plot_utility as pu # information __author__ = '<NAME>' __copyright__ = 'Copyright (C) 2020 - <NAME>' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = '<NAME>' __email__ = 'toru.ver.11 at-sign gmail.com' __all__ = [] def create_ramp(): x = np.linspace(0, 1, 1920).reshape((1, 1920, 1)) img = np.ones((1080, 1920, 3)) img = x * img write_image(img, "test_src.tif", bit_depth='uint16') def create_exr_ramp(min_exposure=-12, max_exposure=12): x = np.linspace(0, 1, 1920).reshape((1, 1920, 1)) y = tpg.shaper_func_log2_to_linear( x, min_exposure=min_exposure, max_exposure=max_exposure) img = np.ones((1080, 1920, 3)) * y fname = f"./img/test_src_exp_{min_exposure}_{max_exposure}.exr" write_image(img, fname, bit_depth='float32') def plot_input_drt(): # file_list = [ # ['./img/old/test_out_sdr100.tif', 'SDR 100'], # ['./img/old/test_out_hdr500.tif', 'HDR 500'], # ['./img/old/test_out_hdr1000.tif', 'HDR 1000'], # ['./img/old/test_out_hdr2000.tif', 'HDR 2000'], # ['./img/old/test_out_hdr4000.tif', 'HDR 4000'], # ['./img/old/test_out_off.tif', 'DRT OFF'] # ] # check_input_drt_test( # file_list=file_list, graph_name="Input_DRT_Characteristics_w_SDR") # file_list = [ # ['./img/old/test_out_hdr500.tif', 'HDR 500'], # ['./img/old/test_out_hdr1000.tif', 'HDR 1000'], # ['./img/old/test_out_hdr2000.tif', 'HDR 2000'], # ['./img/old/test_out_hdr4000.tif', 'HDR 4000'], # ['./img/old/test_out_off.tif', 'DRT OFF'] # ] # check_input_drt_test( # file_list=file_list, graph_name="Input_DRT_Characteristics_wo_SDR") # file_list = [ # ['./img/old/test_out_sdr_er_100-200.tif', 'SDR ER 100/200'], # ['./img/old/test_out_hdr_er_1000-2000.tif', 'HDR ER 1000/2000'], # ['./img/old/test_out_hdr_er_1000-4000.tif', 'HDR ER 1000/4000'], # ['./img/old/test_out_hdr_er_1000-10000.tif', 'HDR ER 1000/10000'], # ['./img/old/test_out_hdr_er_4000-10000.tif', 'HDR ER 4000/10000'], # ['./img/old/test_out_off.tif', 'DRT OFF'] # ] # check_input_drt_test( # file_list=file_list, graph_name="Input_DRT_Characteristics_ER_w_SDR") file_list = [ ['./img/old/test_out_hdr_er_1000-2000.tif', 'HDR ER 1000/2000', '-.'], ['./img/old/test_out_hdr_er_1000-4000.tif', 'HDR ER 1000/4000', '--'], ['./img/old/test_out_hdr_er_1000-10000.tif', 'HDR ER 1000/10000', '-'], ['./img/old/test_out_hdr_er_4000-10000.tif', 'HDR ER 4000/10000', '-'], # ['./img/old/test_out_off.tif', 'DRT OFF'] ] check_input_drt_test( file_list=file_list, graph_name="Input_DRT_Characteristics_ER_wo_SDR") # check_input_drt_test_sdr_only() def check_input_drt_test(file_list, graph_name): create_ramp() x = np.linspace(0, 1, 1920) x_luminance = tf.eotf_to_luminance(x, tf.ST2084) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(10, 8), graph_title="DaVinci17 Input DRT Characteristics", graph_title_size=None, xlabel="Input Luminance [cd/m2]", ylabel="Output Luminance [cd/m2]", axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009, 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True) pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0") for idx in range(len(file_list))[::-1]: img = read_image(file_list[idx][0])[0, :, 0] label = file_list[idx][1] ls = file_list[idx][2] y_luminance = tf.eotf_to_luminance(img, tf.ST2084) ax1.plot(x_luminance, y_luminance, ls, label=label) plt.legend(loc='upper left') fname_full = f"./img/{graph_name}.png" plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1) # plt.show() plt.close(fig) def check_input_drt_test_sdr_only(): create_ramp() x = np.linspace(0, 1, 1920) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(10, 8), graph_title="DaVinci17 Input DRT Characteristics", graph_title_size=None, xlabel="Input Luminance [cd/m2]", ylabel="Output Luminance [cd/m2]", axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009, 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True) pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0") # img = read_image("./img/test_out_sdr100_on_gm24.tif")[0, :, 0] # label = "DRT OFF(ST2084 to Gamma2.4 (.tif))" # x_luminance = tf.eotf_to_luminance(x, tf.ST2084) # y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24) # ax1.plot(x_luminance, y_luminance, label=label) # img = read_image("./img/test_out_sdr100_on_gm24_203nits.tif")[0, :, 0] # label = "DRT OFF(ST2084 to Gamma2.4 (.tif) 203nits)" # x_luminance = tf.eotf_to_luminance(x, tf.ST2084) # y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24) # ax1.plot(x_luminance, y_luminance, label=label) img = read_image("./img/old/test_out_sdr100_on_gm24.tif")[0, :, 0] label = 'SDR 100 (Output color space is Gamma2.4)' x_luminance = tf.eotf_to_luminance(x, tf.ST2084) y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24) ax1.plot(x_luminance, y_luminance, label=label) # img = read_image("./img/test_out_exp_-12_12_sdr_drt-off_gm24.tif")[0, :, 0] # label = "DRT OFF(Gamma2.4 to Gamma2.4 (.tif))" # x_luminance = tf.eotf_to_luminance(x, tf.GAMMA24) # y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24) # ax1.plot(x_luminance, y_luminance, label=label) # img = read_image("./img/test_out_exp_-12_12_sdr_drt-off.tif")[0, :, 0] # label = "DRT OFF(Linear to Gamma2.4 (.exr))" # y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24) # x = np.linspace(0, 1, 1920) # x_luminance = tpg.shaper_func_log2_to_linear( # x, min_exposure=-12, max_exposure=12) # ax1.plot( # x_luminance * 100, y_luminance, '--', color=pu.SKY, label=label) plt.legend(loc='upper left') fname_full = "./img/input_drt_sdr_only.png" plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1) # plt.show() plt.close(fig) def check_100nits_code_value_on_st2084(): code_value = tf.oetf_from_luminance(100, tf.ST2084) print(code_value) print(code_value * 1023) def plot_forum_fig1(): x = np.linspace(0, 1, 1920) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(10, 8), graph_title="HDR to SDR conversion", graph_title_size=None, xlabel="Input Luminance [cd/m2]", ylabel="Output Luminance [cd/m2]", axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009, 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True) pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0") img = read_image("./img/dv17_fig1_sdr_out_st2084.tif")[0, :, 0] label = "(a) src: ST2084(.tif)" x_luminance = tf.eotf_to_luminance(x, tf.ST2084) y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24) ax1.plot(x_luminance, y_luminance, color=pu.BLUE, label=label) # img = read_image("./img/dv17_fig1_203_sdr_out_st2084.tif")[0, :, 0] # label = "(b) src: ST2084(.tif), ref-white: 203nits" # x_luminance = tf.eotf_to_luminance(x, tf.ST2084) # y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24) # ax1.plot(x_luminance, y_luminance, label=label) img = read_image("./img/dv17_fig1_sdr_out_linear.tif")[0, :, 0] label = "(b) src: Linear(.exr), This is the expected result." y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24) x = np.linspace(0, 1, 1920) x_luminance = tpg.shaper_func_log2_to_linear( x, min_exposure=-12, max_exposure=12) ax1.plot( x_luminance * 100, y_luminance, '--', color=pu.RED, label=label) # img = read_image("./img/dv17_fig1_203_sdr_out_linear.tif")[0, :, 0] # label = "src=Linear(.exr), ref-white=203nits" # y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24) # x = np.linspace(0, 1, 1920) # x_luminance = tpg.shaper_func_log2_to_linear( # x, min_exposure=-12, max_exposure=12) # ax1.plot( # x_luminance * 100, y_luminance, label=label) plt.legend(loc='upper left') fname_full = "./img/fig1.png" plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1) # plt.show() plt.close(fig) def plot_output_drt(): # file_list = [ # # ['./img/Output_DRT_SDR_ER_100-200.tif', 'SDR ER 100/200', '-'], # ['./img/old/Output_DRT_HDR_ER_1000-2000.tif', 'HDR ER 1000/2000', '-'], # ['./img/old/Output_DRT_HDR_ER_1000-4000.tif', 'HDR ER 1000/4000', '-'], # ['./img/old/Output_DRT_HDR_ER_1000-10000.tif', 'HDR ER 1000/10000', '-'], # ['./img/old/Output_DRT_HDR_ER_4000-10000.tif', 'HDR ER 4000/10000', '--'], # ] # check_output_drt_test( # file_list=file_list, # graph_name="DaVinci17 Output DRT ER 無印ST2084") # file_list = [ # # ['./img/Output_DRT_SDR_ER_100-200.tif', 'SDR ER 100/200', '-'], # ['./img/Output_DRT_HDR_ER_1000-2000.tif', 'HDR ER 1000/2000', '-'], # ['./img/Output_DRT_HDR_ER_1000-4000.tif', 'HDR ER 1000/4000', '-'], # ['./img/Output_DRT_HDR_ER_1000-10000.tif', 'HDR ER 1000/10000', '-'], # ['./img/Output_DRT_HDR_ER_4000-10000.tif', 'HDR ER 4000/10000', '--'], # ] # check_output_drt_test( # file_list=file_list, # graph_name="DaVinci17 Output DRT Characteristics ER") # file_list = [ # # ['./img/Output_DRT_SDR_100.tif', 'SDR 100', '-'], # ['./img/old/Output_DRT_HDR_500.tif', 'HDR 500', '-'], # ['./img/old/Output_DRT_HDR_1000.tif', 'HDR 1000', '-'], # ['./img/old/Output_DRT_HDR_2000.tif', 'HDR 2000', '-'], # ['./img/old/Output_DRT_HDR_4000.tif', 'HDR 4000', '-'] # ] # check_output_drt_test( # file_list=file_list, # graph_name="DaVinci17 Output DRT 無印 ST2084") file_list = [ # ['./img/Output_DRT_SDR_100.tif', 'SDR 100', '-'], ['./img/Output_DRT_HDR_500.tif', 'HDR 500', '-'], ['./img/Output_DRT_HDR_1000.tif', 'HDR 1000', '-'], ['./img/Output_DRT_HDR_2000.tif', 'HDR 2000', '-'], ['./img/Output_DRT_HDR_4000.tif', 'HDR 4000', '-'], ['./img/Output_DRT_HDR_10000.tif', 'Custom (10000 nit)', '--'] ] check_output_drt_test( file_list=file_list, graph_name="DaVinci17 Output DRT Characteristics") file_list = [ ['./img/DRT_In_None_HDR1000-500.tif', 'HDR 1000, ST2084 500 nit', '-'], ['./img/DRT_In_None_HDR1000-1000.tif', 'HDR 1000, ST2084 1000 nit', '-'], ['./img/DRT_In_None_HDR1000-2000.tif', 'HDR 1000, ST2084 2000 nit', '-'], ['./img/DRT_In_None_HDR1000-4000.tif', 'HDR 1000, ST2084 4000 nit', '-'], ['./img/DRT_In_None_HDR1000-10000.tif', 'HDR 1000, ST2084 10000 nit', '-'], ] check_output_drt_test( file_list=file_list, graph_name="DaVinci17 Out DRT Characteristics_fix_HDR1000") def check_output_drt_test(file_list, graph_name): x = np.linspace(0, 1, 1920) x_luminance = tf.eotf_to_luminance(x, tf.ST2084) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(10, 8), graph_title="DaVinci17 Output DRT Characteristics", graph_title_size=None, xlabel="Input Luminance [cd/m2]", ylabel="Output Luminance [cd/m2]", axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009, 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True) pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0") for idx in range(len(file_list)): img = read_image(file_list[idx][0])[0, :, 0] label = file_list[idx][1] ls = file_list[idx][2] y_luminance = tf.eotf_to_luminance(img, tf.ST2084) ax1.plot(x_luminance, y_luminance, ls, label=label) plt.legend(loc='upper left') fname_full = f"./img/{graph_name}.png".replace(' ', "_") plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1) # plt.show() plt.close(fig) def check_output_drt_test_exr(file_list, graph_name): x = np.linspace(0, 1, 1920) x_luminance = tf.eotf_to_luminance(x, tf.ST2084) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(10, 8), graph_title=graph_name, graph_title_size=None, xlabel="Input Luminance [cd/m2]", ylabel="Output Luminance [cd/m2]", axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=None, xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True) pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0") for idx in range(len(file_list)): img = read_image(file_list[idx][0])[0, :, 0] label = file_list[idx][1] ls = file_list[idx][2] y_luminance = img * 10000 ax1.plot(x_luminance, y_luminance, ls, label=label) plt.legend(loc='upper left') fname_full = f"./img/{graph_name}.png".replace(' ', "_") plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1) # plt.show() plt.close(fig) def plot_total_drt(): file_list = [ ['./img/DRT_Total_HDR_500.tif', 'HDR 500', '-'], ['./img/DRT_Total_HDR_1000.tif', 'HDR 1000', '-'], ['./img/DRT_Total_HDR_2000.tif', 'HDR 2000', '-'], ['./img/DRT_Total_HDR_4000.tif', 'HDR 4000', '-'], ['./img/DRT_Total_HDR_10000.tif', 'Custom (10000 nit)', '-'], ] check_total_drt_test( file_list=file_list, graph_name="Input-Output_DRT_Characteristics") file_list = [ ['./img/Output_DRT_HDR1000-500.tif', 'HDR 1000, ST2084 500 nit', '-'], ['./img/Output_DRT_HDR1000-1000.tif', 'HDR 1000, ST2084 1000 nit', '-'], ['./img/Output_DRT_HDR1000-2000.tif', 'HDR 1000, ST2084 2000 nit', '-'], ['./img/Output_DRT_HDR1000-4000.tif', 'HDR 1000, ST2084 4000 nit', '-'], ['./img/Output_DRT_HDR1000-10000.tif','HDR 1000, ST2084 10000 nit', '-'], ] check_total_drt_test( file_list=file_list, graph_name="DaVinci17 In-Out DRT Characteristics_fix_HDR1000") file_list = [ ['./img/DRT_Total_HDR_ER_1000-2000.tif', 'HDR ER 1000/2000', '-'], ['./img/DRT_Total_HDR_ER_1000-4000.tif', 'HDR ER 1000/4000', '-'], ['./img/DRT_Total_HDR_ER_1000-10000.tif', 'HDR ER 1000/10000', '-'], ['./img/DRT_Total_HDR_ER_4000-10000.tif', 'HDR ER 4000/10000', '-'], ] check_total_drt_test( file_list=file_list, graph_name="Input-Output_DRT_Characteristics_ER") def check_total_drt_test(file_list, graph_name): x = np.linspace(0, 1, 1920) x_luminance = tf.eotf_to_luminance(x, tf.ST2084) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(10, 8), graph_title="DaVinci17 Input-Output DRT Characteristics", graph_title_size=None, xlabel="Input Luminance [cd/m2]", ylabel="Output Luminance [cd/m2]", axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009, 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True) pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0") for idx in range(len(file_list)): img = read_image(file_list[idx][0])[0, :, 0] label = file_list[idx][1] ls = file_list[idx][2] y_luminance = tf.eotf_to_luminance(img, tf.ST2084) ax1.plot(x_luminance, y_luminance, ls, label=label) plt.legend(loc='upper left') fname_full = f"./img/{graph_name}.png".replace(' ', "_") plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1) # plt.show()
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # formats: ipynb,py # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.6.0 # kernelspec: # display_name: deep_ml_curriculum # language: python # name: deep_ml_curriculum # --- # # Time Series Forcasting # # In time series forcasting (TSF) the goal is to predict the future values using the behaviour of data in the past. We can use some of the tehniques we learned about in the last notebook. For instance, Holt-Winters methods can be used for forcasting as well as analysis. import pandas as pd import matplotlib.pyplot as plt import numpy as np import warnings plt.rcParams["figure.figsize"] = [12,5] warnings.simplefilter("ignore") # We will load a subset of London Smart meters dataset. This dataset shows electricity consumption of 5,567 houses in London. We will only use the data for a single block. # # The data shows daily consumption of each house and various statistics regarding their daily consumption. The original data is from [UK Power Networks](https://data.london.gov.uk/dataset/smartmeter-energy-use-data-in-london-households) # Load data df = block0 = pd.read_csv("../../data/processed/smartmeter/block_0.csv", parse_dates=['day'], index_col=['day'])[['energy_sum']] # Get the mean over all houses, by day df = df.groupby('day').mean().iloc[:-1] # Rename energy to target df = df.rename(columns={'energy_sum':'target'}) df.plot() df # In forcasting we try to predict the next step, therefore it is essential that we specify the frequency of data so the model knows what we mean by next step. # # Pandas data frames have frequency property, which we need to set df.index # You can see at the bottom `freq` is set to `None`. We need to specify the the data is monthly and the dates are start of the month. So we use `freq = "MS"`. df.index.freq = "1D" # __Note:__ Most of the algorithms have ways of infering the frequency if it is not set. But it is always safer to set it ourselves rather than leave it for the algorithms to figure out. # To measure whether we are doing well in our prediction or not, commonly we split the data into two parts, one for training the model and the other for evaluating the forcasting quality. In time series we train on the past and predict on the future, so the validation set needs to be in the future. # # The part that is used for taining is called training set and for time series it usually is the data from the beginning up to a certain point in time. The part that is used for evaluation is may be called validation set, test set, or evaluation set. The validation set comes right after the training set, because we use the training set to understand the behaviour of data and then we want to know what is going to happen right after that. # # # Let's split our data into training and validation set. Let's split in a way so that last 30% is in validation set and the rest in training set. # + # We are forecasting, so split into past and future n_split = -int(len(df)*0.7) df_train = df[:-n_split] df_valid = df[-n_split:] ax = df_train['target'].plot(legend=True, label="Train") df_valid['target'].plot(ax=ax, legend=True, label="Validation") # - # ## Stationarity # # A time series is considered stationary when its properties (mean and standard deviation) does not change with time. Therefore, any time series with trend or seasonality is not stationary. An example of stationary data is white noise: plt.figure(figsize=(12, 8)) plt.plot(range(100), np.arange(100)/50, ls=':', c='b', label='line - not stationary') plt.plot(range(100),np.sin(np.arange(100)/5)-2, c='b', label='sin - not stationary') plt.plot(range(100), np.zeros(100), c='r', label='zeros - stationary') plt.plot(range(100), np.random.randn(100)+4, ls='--', c='r', label='random noise - stationary') plt.legend() plt.xlabel('time [days]') plt.title('examples of non/stationary series') # Why is random noise stationary? # The std and mean are constant np.random.seed(42) random_noise = pd.Series(np.random.randn(200)) plt.plot(random_noise, label='random noise') random_noise.rolling(30).mean().plot(label='mean') random_noise.rolling(30).std().plot(label='std') plt.legend() # Sin - this is not stationary # The std and mean are not constant np.random.seed(42) series_sin = pd.Series(np.sin(np.arange(200)/5)) plt.plot(series_sin, label='sin(x/5)') series_sin.rolling(50).mean().plot(label='mean') series_sin.rolling(50).std().plot(label='std') plt.legend() # While it is easy to tell if a time series is not stationary when there is a clear trend, in some cases it might be pretty difficult to decide whether a time series is stationary or not. Therefore, we use statistical tests to make a decision. # # __Why is it important if a time series is stationary or not?__<br> # We know that in a stationary time series the characteristics will remain constant. This makes it easier to predict their future behaviour as we expect them to behave similarly. But when the series is not stationary we don't know how it is going to behave in the future. In reality, most of the time series we are going to work with are not stationary. But using various techniques we might be able to transform them into a stationary time series. This is exactly what we just did. We use STL to remove the trend and seasonality to get a stationary time series. # #### Augmented Dickey-Fuller test # # [Augmented Dickey-Fuller test](https://en.wikipedia.org/wiki/Augmented_Dickey%E2%80%93Fuller_test) (ADF) is a statistical test for stationarity. We are not going to discuss the statistical details of this test, but what matters to us is the result. # # The null hpothesis of ADF is: `the series is stationary.` # # Let's test it on our data. # # + from statsmodels.tsa.stattools import adfuller def adf_p_value(data): p = adfuller(data)[1] # If p-value is lower than a threshold (commonly 0.05), if p<0.05: # it means the null hypothesis is rejected and therefore the time series is stationary. return 'stationary (p={:2.2g})'.format(p) else: return 'not stationary (p={:2.2g})'.format(p) # - adf_p_value(df["target"]) # The function returns many values, but the one that we are interested in is p-value, which the second value. If it is less than 0.05, it means time series is stationary. In this case it is far from 0.05 and that is what we expected as the data has clear trend.<br> # Now let's turn it into a function that only return the p-value and run the test on white noise. adf_p_value(random_noise) # The value is very small, which suggests we can reject the null hypothesis and therefore the series is stationary. # ## Decomposing # What if we remove trend and seasonality from the data using STL method? from statsmodels.tsa.seasonal import seasonal_decompose res = seasonal_decompose(df[:100], model="mul") res.plot() '' # If we remove the seasonal and trend component what is left is the residuals.<br> # The residuals might have `NaN` in it. If so, we need to remove them before performing the test. adf_p_value(res.resid.dropna().values[:, 0]) # The residual is stationary since the p value is lower than 0.05. df.plot() df.diff().plot() df.diff(2).plot() # Another technique to make a time series stationary is differencing. Differencing means that we calculate the difference between two consecutive points in time. Then we use the differences for forcasting.<br> # Let's see how differencing will affect our data. Pandas has a builtin method for differencing (`.diff()`): df.diff() # We need to get rid of `NaN` so we can run the test. adf_p_value(df.diff().dropna()["target"]) # As we can see p-value is below the 0.05 threshold, which means differencing helped to convert data into stationary time series. <br> # In some cases you might need to perform differencing multiple times to reach stationary results. adf_p_value(df.diff(2).dropna()["target"]) # ## Autocorrelation # Another characteristics of a time series is autocorrelation. Autocorrelation is simply the correlation between the points in the time series and the points before them (sometimes called lagged values). # # The shaded area is the confidence threshold on the correlation using Bartlett's formula $1/\sqrt{N}$ which assumes a guassian distribution. If a correlations is below this threshold is it's likely to be a coincidence. from statsmodels.graphics.tsaplots import plot_acf df.plot() plot_acf(df) plt.xlabel('Lag (day)') plt.ylabel('Correlation coeffecient') '' # The points closer together in time have higher correlation compared to the points further apart. This is an expected behaviour. However, how quickly does the correlation decreases is important. # ## Autoregressive models (AR) # An [autoregressive model](https://en.wikipedia.org/wiki/Autoregressive_model), is a time series model which assumes a linear relationship between each point in time and its past $p$ points. # # $$y_t=c+\sum_{i=1}^{p}\phi_iy_{t-i}$$ # For instance a first order AR (also shown as AR(1)) can be written as:<br> # $$y_t=c+\phi_1 y_{t-1}$$ # This model can be found in statsmodels in ar_model submodule. # This is to avoid some warning messages from statsmodels import warnings warnings.filterwarnings("ignore", category=FutureWarning) from statsmodels.tsa.ar_model import AR, ARResults # Let's try an AR model on our data. # + model = AR(df_train) # Then we train the model specifying the order of AR. Let's start by trying `1`. trained_model = model.fit( maxlag=2, trend='nc', ) # Now the model
CREATE SCALAR TYPE foo EXTENDING str { CREATE CONSTRAINT aaa(10); }; ''') async def test_edgeql_ddl_constraint_03(self): # Test for #1727. Usage of EXISTS in constraints. await self.con.execute(r""" CREATE TYPE TypeCon03 { CREATE PROPERTY name -> str { # emulating "required" CREATE CONSTRAINT expression ON (EXISTS __subject__) } }; """) await self.con.execute(""" INSERT TypeCon03 {name := 'OK'}; """) with self.assertRaisesRegex( edgedb.ConstraintViolationError, r'invalid name'): async with self.con.transaction(): await self.con.execute(""" INSERT TypeCon03; """) @test.xfail(''' EXISTS constraint violation not raised for MULTI property. ''') async def test_edgeql_ddl_constraint_04(self): # Test for #1727. Usage of EXISTS in constraints. await self.con.execute(r""" CREATE TYPE TypeCon04 { CREATE MULTI PROPERTY name -> str { # emulating "required" CREATE CONSTRAINT expression ON (EXISTS __subject__) } }; """) await self.con.execute(""" INSERT TypeCon04 {name := 'OK'}; """) with self.assertRaisesRegex( edgedb.ConstraintViolationError, r'invalid name'): async with self.con.transaction(): await self.con.execute(""" INSERT TypeCon04 {name := {}}; """) with self.assertRaisesRegex( edgedb.ConstraintViolationError, r'invalid name'): async with self.con.transaction(): await self.con.execute(""" INSERT TypeCon04; """) async def test_edgeql_ddl_constraint_05(self): # Test for #1727. Usage of EXISTS in constraints. await self.con.execute(r""" CREATE TYPE Child05; CREATE TYPE TypeCon05 { CREATE LINK child -> Child05 { # emulating "required" CREATE CONSTRAINT expression ON (EXISTS __subject__) } }; """) await self.con.execute(""" INSERT Child05; INSERT TypeCon05 {child := (SELECT Child05 LIMIT 1)}; """) with self.assertRaisesRegex( edgedb.ConstraintViolationError, r'invalid child'): async with self.con.transaction(): await self.con.execute(""" INSERT TypeCon05; """) @test.xfail(''' EXISTS constraint violation not raised for MULTI links. ''') async def test_edgeql_ddl_constraint_06(self): # Test for #1727. Usage of EXISTS in constraints. await self.con.execute(r""" CREATE TYPE Child06; CREATE TYPE TypeCon06 { CREATE MULTI LINK children -> Child06 { # emulating "required" CREATE CONSTRAINT expression ON (EXISTS __subject__) } }; """) await self.con.execute(""" INSERT Child06; INSERT TypeCon06 {children := Child06}; """) with self.assertRaisesRegex( edgedb.ConstraintViolationError, r'invalid children'): async with self.con.transaction(): await self.con.execute(""" INSERT TypeCon06; """) async def test_edgeql_ddl_constraint_07(self): # Test for #1727. Usage of EXISTS in constraints. await self.con.execute(r""" CREATE TYPE Child07; CREATE TYPE TypeCon07 { CREATE LINK child -> Child07 { CREATE PROPERTY index -> int64; # emulating "required" CREATE CONSTRAINT expression ON (EXISTS __subject__@index) } }; """) await self.con.execute(""" INSERT Child07; INSERT TypeCon07 { child := (SELECT Child07 LIMIT 1){@index := 0} }; """) with self.assertRaisesRegex( edgedb.ConstraintViolationError, r'invalid child'): async with self.con.transaction(): await self.con.execute(""" INSERT TypeCon07 { child := (SELECT Child07 LIMIT 1) }; """) async def test_edgeql_ddl_constraint_08(self): # Test non-delegated object constraints on abstract types await self.con.execute(r""" CREATE TYPE Base { CREATE PROPERTY x -> str { CREATE CONSTRAINT exclusive; } }; CREATE TYPE Foo EXTENDING Base; CREATE TYPE Bar EXTENDING Base; INSERT Foo { x := "a" }; """) with self.assertRaisesRegex( edgedb.ConstraintViolationError, r'violates exclusivity constraint'): await self.con.execute(r""" INSERT Foo { x := "a" }; """) async def test_edgeql_ddl_constraint_09(self): await self.con.execute(r""" CREATE ABSTRACT TYPE Text { CREATE REQUIRED SINGLE PROPERTY body -> str { CREATE CONSTRAINT max_len_value(10000); }; }; CREATE TYPE Comment EXTENDING Text; """) await self.con.execute(""" ALTER TYPE Text ALTER PROPERTY body DROP CONSTRAINT max_len_value(10000); """) async def test_edgeql_ddl_constraint_10(self): await self.con.execute(r""" CREATE ABSTRACT TYPE Text { CREATE REQUIRED SINGLE PROPERTY body -> str { CREATE CONSTRAINT max_len_value(10000); }; }; CREATE TYPE Comment EXTENDING Text; """) await self.con.execute(""" ALTER TYPE Text DROP PROPERTY body; """) async def test_edgeql_ddl_constraint_11(self): await self.con.execute(r""" CREATE ABSTRACT TYPE Text { CREATE REQUIRED SINGLE PROPERTY body -> str { CREATE CONSTRAINT max_value(10000) ON (len(__subject__)); }; }; CREATE TYPE Comment EXTENDING Text; CREATE TYPE Troll EXTENDING Comment; """) await self.con.execute(""" ALTER TYPE Text ALTER PROPERTY body DROP CONSTRAINT max_value(10000) ON (len(__subject__)); """) async def test_edgeql_ddl_constraint_12(self): with self.assertRaisesRegex( edgedb.errors.SchemaError, r'Constraint .+ is already present in the schema'): await self.con.execute(r""" CREATE TYPE Base { CREATE PROPERTY firstname -> str { CREATE CONSTRAINT max_len_value(10); CREATE CONSTRAINT max_len_value(10); } } """) async def test_edgeql_ddl_constraint_13(self): await self.con.execute(r""" CREATE ABSTRACT CONSTRAINT Lol { USING ((__subject__ < 10)); }; CREATE TYPE Foo { CREATE PROPERTY x -> int64 { CREATE CONSTRAINT Lol; }; }; CREATE TYPE Bar EXTENDING Foo; """) await self.con.execute(r""" ALTER ABSTRACT CONSTRAINT Lol RENAME TO Lolol; """) await self.con.execute(r""" ALTER TYPE Foo DROP PROPERTY x; """) async def test_edgeql_ddl_constraint_alter_01(self): await self.con.execute(r""" CREATE TYPE ConTest01 { CREATE PROPERTY con_test -> int64; }; ALTER TYPE ConTest01 ALTER PROPERTY con_test CREATE CONSTRAINT min_value(0); """) await self.con.execute(""" ALTER TYPE ConTest01 ALTER PROPERTY con_test DROP CONSTRAINT min_value(0); """) await self.assert_query_result(""" WITH MODULE schema SELECT ObjectType { name, properties: { name, constraints: { name } } FILTER .name = 'con_test' } FILTER .name = 'default::ConTest01'; """, [ { 'name': 'default::ConTest01', 'properties': [{ 'name': 'con_test', 'constraints': {}, }] } ]) async def test_edgeql_ddl_constraint_alter_02(self): # Create constraint, then add and drop annotation for it. This # is similar to `test_edgeql_ddl_annotation_06`. await self.con.execute(r''' CREATE SCALAR TYPE contest2_t EXTENDING int64 { CREATE CONSTRAINT expression ON (__subject__ > 0); }; ''') await self.con.execute(r''' ALTER SCALAR TYPE contest2_t { ALTER CONSTRAINT expression ON (__subject__ > 0) { CREATE ANNOTATION title := 'my constraint 2' } }; ''') await self.assert_query_result( r''' WITH MODULE schema SELECT ScalarType { constraints: { subjectexpr, annotations: { name, @value, } } } FILTER .name = 'default::contest2_t'; ''', [{ "constraints": [{ "subjectexpr": "(__subject__ > 0)", "annotations": [{ "name": "std::title", "@value": "my constraint 2", }] }] }] ) await self.con.execute(r''' ALTER SCALAR TYPE contest2_t { ALTER CONSTRAINT expression ON (__subject__ > 0) { DROP ANNOTATION title; } }; ''') await self.assert_query_result( r''' WITH MODULE schema SELECT ScalarType { constraints: { subjectexpr, annotations: { name, @value, } } } FILTER .name = 'default::contest2_t'; ''', [{ "constraints": [{ "subjectexpr": "(__subject__ > 0)", "annotations": [] }] }] ) async def test_edgeql_ddl_constraint_alter_03(self): # Create constraint annotation using DDL, then drop annotation # using SDL. This is similar to `test_edgeql_ddl_annotation_07`. await self.con.execute(r''' CREATE SCALAR TYPE contest3_t EXTENDING int64 { CREATE CONSTRAINT expression ON (__subject__ > 0) { CREATE ANNOTATION title := 'my constraint 3'; } }; ''') await self.assert_query_result( r''' WITH MODULE schema SELECT ScalarType { constraints: { subjectexpr, annotations: { name, @value, } } } FILTER .name = 'default::contest3_t'; ''', [{ "constraints": [{ "subjectexpr": "(__subject__ > 0)", "annotations": [{ "name": "std::title", "@value": "my constraint 3", }] }] }] ) await self.migrate(r''' scalar type contest3_t extending int64 { constraint expression on (__subject__ > 0); }; ''') await self.assert_query_result( r''' WITH MODULE schema SELECT ScalarType { constraints: { subjectexpr, annotations: { name, @value, } } } FILTER .name = 'default::contest3_t'; ''', [{ "constraints": [{ "subjectexpr": "(__subject__ > 0)", "annotations": [] }] }] ) async def test_edgeql_ddl_constraint_alter_04(self): # Create constraints using DDL, then add annotation to it # using SDL. This tests how "on expr" is handled. This is # similar to `test_edgeql_ddl_annotation_08`. await self.con.execute(r''' CREATE SCALAR TYPE contest4_t EXTENDING int64 { CREATE CONSTRAINT expression ON (__subject__ > 0); }; ''') await self.assert_query_result( r''' WITH MODULE schema SELECT ScalarType { constraints: { subjectexpr, annotations: { name, @value, } } } FILTER .name = 'default::contest4_t'; ''', [{ "constraints": [{ "subjectexpr": "(__subject__ > 0)", "annotations": [] }] }] ) await self.migrate(r''' scalar type contest4_t extending int64 { constraint expression on (__subject__ > 0) { annotation title := 'my constraint 5'; } }; ''') await self.assert_query_result( r''' WITH MODULE schema SELECT ScalarType { constraints: { subjectexpr, annotations: { name, @value, } } } FILTER .name = 'default::contest4_t'; ''', [{ "constraints": [{ "subjectexpr": "(__subject__ > 0)", "annotations": [{ "name": "std::title", "@value": "my constraint 5", }] }] }] ) async def test_edgeql_ddl_constraint_alter_05(self): await self.con.execute(r""" CREATE TYPE Base { CREATE PROPERTY firstname -> str { CREATE CONSTRAINT max_len_value(10); } } """) with self.assertRaisesRegex( edgedb.errors.SchemaError, r'Constraint .+ is already present in the schema'): await self.con.execute(r""" ALTER TYPE Base { ALTER PROPERTY firstname { CREATE CONSTRAINT max_len_value(10); } } """) async def test_edgeql_ddl_drop_inherited_link(self): await self.con.execute(r""" CREATE TYPE Target; CREATE TYPE Parent { CREATE LINK dil_foo -> Target; }; CREATE TYPE Child EXTENDING Parent; CREATE TYPE GrandChild EXTENDING Child; """) await self.con.execute(""" ALTER TYPE Parent DROP LINK dil_foo; """) async def test_edgeql_ddl_drop_01(self): # Check that constraints defined on
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._inputs import * __all__ = ['ExportArgs', 'Export'] @pulumi.input_type class ExportArgs: def __init__(__self__, *, datastore_name: pulumi.Input[str], date_range: pulumi.Input['GoogleCloudApigeeV1DateRangeArgs'], environment_id: pulumi.Input[str], organization_id: pulumi.Input[str], csv_delimiter: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, output_format: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Export resource. :param pulumi.Input[str] datastore_name: Name of the preconfigured datastore. :param pulumi.Input['GoogleCloudApigeeV1DateRangeArgs'] date_range: Date range of the data to export. :param pulumi.Input[str] csv_delimiter: Optional. Delimiter used in the CSV file, if `outputFormat` is set to `csv`. Defaults to the `,` (comma) character. Supported delimiter characters include comma (`,`), pipe (`|`), and tab (`\t`). :param pulumi.Input[str] description: Optional. Description of the export job. :param pulumi.Input[str] name: Display name of the export job. :param pulumi.Input[str] output_format: Optional. Output format of the export. Valid values include: `csv` or `json`. Defaults to `json`. Note: Configure the delimiter for CSV output using the `csvDelimiter` property. """ pulumi.set(__self__, "datastore_name", datastore_name) pulumi.set(__self__, "date_range", date_range) pulumi.set(__self__, "environment_id", environment_id) pulumi.set(__self__, "organization_id", organization_id) if csv_delimiter is not None: pulumi.set(__self__, "csv_delimiter", csv_delimiter) if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if output_format is not None: pulumi.set(__self__, "output_format", output_format) @property @pulumi.getter(name="datastoreName") def datastore_name(self) -> pulumi.Input[str]: """ Name of the preconfigured datastore. """ return pulumi.get(self, "datastore_name") @datastore_name.setter def datastore_name(self, value: pulumi.Input[str]): pulumi.set(self, "datastore_name", value) @property @pulumi.getter(name="dateRange") def date_range(self) -> pulumi.Input['GoogleCloudApigeeV1DateRangeArgs']: """ Date range of the data to export. """ return pulumi.get(self, "date_range") @date_range.setter def date_range(self, value: pulumi.Input['GoogleCloudApigeeV1DateRangeArgs']): pulumi.set(self, "date_range", value) @property @pulumi.getter(name="environmentId") def environment_id(self) -> pulumi.Input[str]: return pulumi.get(self, "environment_id") @environment_id.setter def environment_id(self, value: pulumi.Input[str]): pulumi.set(self, "environment_id", value) @property @pulumi.getter(name="organizationId") def organization_id(self) -> pulumi.Input[str]: return pulumi.get(self, "organization_id") @organization_id.setter def organization_id(self, value: pulumi.Input[str]): pulumi.set(self, "organization_id", value) @property @pulumi.getter(name="csvDelimiter") def csv_delimiter(self) -> Optional[pulumi.Input[str]]: """ Optional. Delimiter used in the CSV file, if `outputFormat` is set to `csv`. Defaults to the `,` (comma) character. Supported delimiter characters include comma (`,`), pipe (`|`), and tab (`\t`). """ return pulumi.get(self, "csv_delimiter") @csv_delimiter.setter def csv_delimiter(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "csv_delimiter", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Optional. Description of the export job. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Display name of the export job. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="outputFormat") def output_format(self) -> Optional[pulumi.Input[str]]: """ Optional. Output format of the export. Valid values include: `csv` or `json`. Defaults to `json`. Note: Configure the delimiter for CSV output using the `csvDelimiter` property. """ return pulumi.get(self, "output_format") @output_format.setter def output_format(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "output_format", value) class Export(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, csv_delimiter: Optional[pulumi.Input[str]] = None, datastore_name: Optional[pulumi.Input[str]] = None, date_range: Optional[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1DateRangeArgs']]] = None, description: Optional[pulumi.Input[str]] = None, environment_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, organization_id: Optional[pulumi.Input[str]] = None, output_format: Optional[pulumi.Input[str]] = None, __props__=None): """ Submit a data export job to be processed in the background. If the request is successful, the API returns a 201 status, a URI that can be used to retrieve the status of the export job, and the `state` value of "enqueued". Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] csv_delimiter: Optional. Delimiter used in the CSV file, if `outputFormat` is set to `csv`. Defaults to the `,` (comma) character. Supported delimiter characters include comma (`,`), pipe (`|`), and tab (`\t`). :param pulumi.Input[str] datastore_name: Name of the preconfigured datastore. :param pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1DateRangeArgs']] date_range: Date range of the data to export. :param pulumi.Input[str] description: Optional. Description of the export job. :param pulumi.Input[str] name: Display name of the export job. :param pulumi.Input[str] output_format: Optional. Output format of the export. Valid values include: `csv` or `json`. Defaults to `json`. Note: Configure the delimiter for CSV output using the `csvDelimiter` property. """ ... @overload def __init__(__self__, resource_name: str, args: ExportArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Submit a data export job to be processed in the background. If the request is successful, the API returns a 201 status, a URI that can be used to retrieve the status of the export job, and the `state` value of "enqueued". Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state. :param str resource_name: The name of the resource. :param ExportArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(ExportArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, csv_delimiter: Optional[pulumi.Input[str]] = None, datastore_name: Optional[pulumi.Input[str]] = None, date_range: Optional[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1DateRangeArgs']]] = None, description: Optional[pulumi.Input[str]] = None, environment_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, organization_id: Optional[pulumi.Input[str]] = None, output_format: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = ExportArgs.__new__(ExportArgs) __props__.__dict__["csv_delimiter"] = csv_delimiter if datastore_name is None and not opts.urn: raise TypeError("Missing required property 'datastore_name'") __props__.__dict__["datastore_name"] = datastore_name if date_range is None and not opts.urn: raise TypeError("Missing required property 'date_range'") __props__.__dict__["date_range"] = date_range __props__.__dict__["description"] = description if environment_id is None and not opts.urn: raise TypeError("Missing required property 'environment_id'") __props__.__dict__["environment_id"] = environment_id __props__.__dict__["name"] = name if organization_id is None and not opts.urn: raise TypeError("Missing required property 'organization_id'") __props__.__dict__["organization_id"] = organization_id __props__.__dict__["output_format"] = output_format __props__.__dict__["created"] = None __props__.__dict__["error"] = None __props__.__dict__["execution_time"] = None __props__.__dict__["self"] = None __props__.__dict__["state"] = None __props__.__dict__["updated"] = None super(Export, __self__).__init__( 'google-native:apigee/v1:Export', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Export': """ Get an existing Export resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = ExportArgs.__new__(ExportArgs) __props__.__dict__["created"] = None __props__.__dict__["datastore_name"] = None __props__.__dict__["description"] = None __props__.__dict__["error"] = None __props__.__dict__["execution_time"] = None __props__.__dict__["name"] = None __props__.__dict__["self"] = None __props__.__dict__["state"] = None __props__.__dict__["updated"] = None return Export(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def created(self) -> pulumi.Output[str]: """ Time the export job was created. """ return pulumi.get(self, "created") @property @pulumi.getter(name="datastoreName") def datastore_name(self) -> pulumi.Output[str]: """ Name of the datastore that is the destination of the export job [datastore] """ return pulumi.get(self, "datastore_name") @property @pulumi.getter def description(self) -> pulumi.Output[str]: """ Description of the export job. """ return pulumi.get(self, "description") @property @pulumi.getter def error(self) -> pulumi.Output[str]: """ Error is set when export fails """ return pulumi.get(self, "error") @property @pulumi.getter(name="executionTime") def execution_time(self) -> pulumi.Output[str]: """ Execution time for this export job. If the job is still in progress, it will be set to the amount of time that has elapsed since`created`, in seconds. Else, it will set to (`updated` - `created`), in seconds. """ return pulumi.get(self, "execution_time") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Display name of the export job. """ return pulumi.get(self, "name") @property @pulumi.getter def self(self) -> pulumi.Output[str]: """ Self link of the export job. A URI that can be used to retrieve the status of an
10455, 10456, 10457, 10458, 10459, 10460, 10461, 10462, 10463, 10464, 10465, 10466, 10467, 10479, 10498, 10499, 10500, 10501, 10502, 10503, 10504, 10505, 10506, 10507, 10508, 10509, 10510, 10511, 10512, 10513, 10514, 10515, 10518, 10538, 10539, 10540, 10541, 10542, 10543, 10544, 10545, 10546, 10547, 10548, 10549, 10550, 10551, 10552, 10553, 10554, 10556, 10558, 10559, 10560, 10561, 10562, 10563, 10564, 10565, 10566, 10567, 10569, 10570, 10571, 10572, 10573, 10574, 10575, 10576, 10577, 10578, 10581, 10582, 10583, 10584, 10586, 10587, 10588, 10589, 10590, 10592, 10593, 10595, 10597, 10598, 10599, 10600, 10601, 10602, 10603, 10604, 10605, 10606, 10607, 10608, 10609, 10610, 10620, 10621, 10622, 10623, 10624, 10625, 10626, 10627, 10628, 10629, 10630, 10631, 10632, 10633, 10634, 10635, 10636, 10637, 10638, 10639, 10640, 10641, 10642, 10643, 10644, 10645, 10646, 10647, 10648, 10649, 10652, 10653, 10654, 10655, 10656, 10657, 10658, 10659, 10660, 10661, 10662, 10663, 10664, 10678, 10679, 10680, 10681, 10682, 10683, 10684, 10686, 10687, 10688, 10689, 10690, 10691, 10692, 10693, 10694, 10695, 10696, 10697, 10698, 10699, 10700, 10701, 10702, 10703, 10704, 10705, 10706, 10707, 10708, 10709, 10710, 10711, 10712, 10713, 10714, 10715, 10716, 10717, 10718, 10720, 10721, 10722, 10724, 10725, 10726, 10727, 10728, 10738, 10739, 10740, 10741, 10742, 10743, 10744, 10745, 10746, 10747, 10748, 10749, 10750, 10751, 10752, 10753, 10754, 10755, 10757, 10758, 10759, 10760, 10761, 10762, 10763, 10764, 10765, 10766, 10767, 10768, 10769, 10770, 10771, 10772, 10773, 10774, 10775, 10776, 10777, 10778, 10779, 10780, 10781, 10782, 10783, 10784, 10785, 10786, 10787, 10788, 10789, 10790, 10791, 10792, 10793, 10794, 10795, 10796, 10797, 10798, 10799, 10800, 10801, 10802, 10803, 10804, 10805, 10806, 10807, 10808, 10818, 10819, 10820, 10821, 10822, 10823, 10824, 10826, 10827, 10828, 10829, 10830, 10831, 10832, 10833, 10834, 10835, 10836, 10837, 10838, 10839, 10840, 10841, 10842, 10843, 10844, 10845, 10846, 10847, 10858, 10918, 10919, 10920, 10921, 10922, 10938, 10939, 10940, 10958, 10959, 10978, 10998, 10999, 11000, 11018, 11020, 11022, 11023, 11024, 11026, 11027, 11038, 11039, 11040, 11058, 11078, 11079, 11080, 11081, 11082, 11083, 11084, 11086, 11098, 11101, 11102, 11103, 11104, 11105, 11106, 11107, 11108, 11109, 11110, 11112, 11113, 11114, 11116, 11118, 11119, 11120, 11121, 11122, 11123, 11124, 11125, 11126, 11127, 11128, 11129, 11130, 11131, 11132, 11133, 11134, 11135, 11136, 11137, 11138, 11139, 11140, 11141, 11142, 11143, 11144, 11145, 11146, 11147, 11148, 11149, 11150, 11151, 11152, 11162, 11163, 11164, 11165, 11166, 11167, 11168, 11169, 11170, 11172, 11173, 11174, 11175, 11176, 11177, 11178, 11179, 11184, 11185, 11186, 11187, 11188, 11189, 11190, 11191, 11192, 11193, 11194, 11195, 11196, 11197, 11202, 11203, 11204, 11205, 11206, 11207, 11208, 11222, 11223, 11224, 11225, 11226, 11227, 11229, 11230, 11231, 11242, 11243, 11262, 11263, 11265, 11266, 11267, 11268, 11269, 11270, 11282, 11283, 11284, 11285, 11286, 11287, 11288, 11289, 11290, 11291, 11302, 11303, 11304, 11305, 11306, 11307, 11308, 11309, 11310, 11311, 11312, 11313, 11315, 11316, 11318, 11319, 11320, 11324, 11325, 11362, 11363, 11364, 11366, 11367, 11368, 11370, 11371, 11382, 11384, 11385, 11386, 11387, 11388, 11389, 11390, 11391, 11392, 11393, 11394, 11395, 11402, 11403, 11404, 11405, 11406, 11407, 11408, 11409, 11410, 11411, 11412, 11413, 11414, 11415, 11416, 11417, 11418, 11419, 11420, 11422, 11423, 11444, 11445, 11446, 11462, 11463, 11464, 11465, 11466, 11467, 11468, 11469, 11470, 11471, 11472, 11474, 11475, 11476, 11477, 11478, 11479, 11480, 11482, 11502, 11503, 11504, 11507, 11508, 11509, 11510, 11511, 11512, 11513, 11514, 11515, 11516, 11522, 11562, 11563, 11564, 11565, 11566, 11567, 11568, 11569, 11570, 11582, 11583, 11584, 11590, 11602, 11603, 11604, 11605, 11606, 11607, 11608, 11610, 11611, 11612, 11614, 11615, 11617, 11622, 11623, 11624, 11625, 11626, 11627, 11628, 11629, 11630, 11631, 11632, 11633, 11634, 11635, 11642, 11643, 11644, 11645, 11646, 11647, 11648, 11649, 11662, 11665, 11668, 11669, 11674, 11675, 11677, 11678, 11679, 11682, 11684, 11685, 11686, 11702, 11703, 11722, 11723, 11724, 11725, 11726, 11727, 11728, 11729, 11730, 11731, 11732, 11733, 11734, 11735, 11736, 11737, 11742, 11743, 11744, 11745, 11746, 11747, 11748, 11749, 11750, 11751, 11752, 11753, 11754, 11755, 11764, 11765, 11766, 11767, 11768, 11782, 11783, 11784, 11785, 11786, 11787, 11802, 11803, 11804, 11805, 11807, 11808, 11809, 11810, 11811, 11812, 11813, 11814, 11815, 11816, 11817, 11818, 11819, 11820, 11821, 11822, 11823, 11824, 11825, 11826, 11827, 11828, 11829, 11830, 11831, 11832, 11833, 11834, 11835, 11837, 11839, 11840, 11841, 11842, 11843, 11844, 11845, 11846, 11847, 11848, 11849, 11850, 11851, 11852, 11853, 11854, 11855, 11856, 11857, 11858, 11859, 11860, 11861, 11862, 11863, 11864, 11865, 11866, 11867, 11868, 11869, 11870, 11871, 11872, 11873, 11874, 11875, 11876, 11882, 11883, 11884, 11885, 11886, 11887, 11888, 11889, 11902, 11904, 11905, 11906, 11907, 11908, 11909, 11910, 11911, 11912, 11913, 11914, 11915, 11916, 11917, 11918, 11919, 11920, 11921, 11922, 11923, 11924, 11925, 11926, 11927, 11928, 11929, 11930, 11931, 11932, 11933, 11934, 11935, 11936, 11937, 11938, 11939, 11940, 11941, 11942, 11943, 11944, 11945, 11946, 11947, 11948, 11949, 11950, 11951, 11952, 11953, 11954, 11955, 11962, 11963, 11964, 11965, 11966, 11967, 11968, 11969, 11970, 11971, 11972, 11973, 11974, 11975, 11976, 11977, 11978, 11979, 11980, 11981, 11982, 11983, 11984, 11985, 11986, 11987, 11988, 11989, 11990, 11991, 11992, 11993, 11994, 11995, 11996, 11997, 11998, 11999, 12000, 12001, 12002, 12003, 12004, 12005, 12006, 12007, 12008, 12009, 12010, 12011, 12012, 12013, 12014, 12015, 12016, 12017, 12018, 12019, 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035, 12036, 12037, 12038, 12039, 12040, 12041, 12042, 12043, 12044, 12045, 12046, 12047, 12048, 12049, 12050, 12051, 12052, 12053, 12054, 12055, 12056, 12057, 12058, 12059, 12060, 12061, 12062, 12064, 12065, 12066, 12082, 12083, 12102, 12103, 12104, 12106, 12107, 12108, 12109, 12110, 12111, 12112, 12113, 12114, 12115, 12122, 12144, 12162, 12163, 12164, 12184, 12185, 12190, 12191, 12192, 12202, 12203, 12204, 12205, 12206, 12207, 12208, 12209, 12210, 12212, 12213, 12214, 12215, 12216, 12217, 12218, 12219, 12220, 12223, 12224, 12225, 12226, 12227, 12228, 12229, 12230, 12231, 12232, 12233, 12234, 12235, 12236, 12237, 12238, 12239, 12240, 12241, 12242, 12243, 12247, 12248, 12249, 12250, 12251, 12252, 12253, 12254, 12255, 12256, 12257, 12259, 12260, 12261, 12262, 12263, 12264, 12282, 12283, 12284, 12286, 12287, 12288, 12289, 12291, 12292, 12293, 12295, 12296, 12299, 12300, 12301, 12302, 12303, 12323, 12324, 12330, 12334, 12335, 12336, 12337, 12339, 12341, 12342, 12343, 12344, 12345, 12346, 12347, 12349, 12350, 12351, 12352, 12353, 12354, 12355, 12356, 12358, 12359, 12360, 12361, 12363, 12364, 12365, 12366, 12367, 12368, 12382, 12383, 12384, 35495, 12402, 12404, 12405, 12406, 12408, 12409, 12410, 12411, 12412, 12414, 12415, 12416, 12417, 12418, 12419, 12420, 12422, 12424, 12425, 12426, 12427, 12428, 12429, 12430, 12431, 12432, 12433, 12434, 12435, 12436, 12437, 12438, 12444, 12445, 12446, 12447, 12448, 12449, 12450, 12451, 12455, 12457, 12458, 12459, 12460, 12462, 12463, 12464, 12465, 12466, 12467, 12470, 12471, 12472, 12522, 12524, 12525, 12527, 12528, 12529, 12530, 12531, 12532, 12533, 12534, 12535, 12542, 12543, 12544, 12545, 12546, 12547, 12548, 12549, 12550, 12551, 12552, 12553, 12554, 12555, 12556, 12557, 12558, 12562, 12563, 12564, 12565, 12566, 12567, 12582, 12583, 12584, 12586, 12587, 12588, 12589, 12590, 12592, 12602, 12603, 12604, 12605, 12606, 12607, 12608, 12609, 12610, 12611, 12612, 12613, 12614, 12618, 12619, 12620, 12621, 12622, 12623, 12624, 12625, 12626, 12627, 12628, 12630, 12631, 12632, 12633, 12634, 12635, 12636, 12637, 12638, 12639, 12640, 12641, 12642, 12643, 12644, 12645, 12646, 12647, 12648, 12649, 12650, 12651, 12652, 12653, 12654, 12655, 12662, 12663, 12682, 12683, 12684, 12685, 12687, 12688, 12689, 12690, 12691, 12692, 12693, 12694, 12695, 12696, 12697, 12698, 12699, 12700, 12701, 12702, 12703, 12704, 12705, 12706, 12707, 12708, 12709, 12710, 12711, 12712, 12713, 12714, 12715, 12716, 12717, 12718, 12719, 12720, 12721, 12722, 12723, 12724, 12725, 12726, 12727, 12728, 12730, 12731, 12732, 12733, 12734, 12735, 12736, 12737, 12738, 12739, 12740, 12741, 12752, 12753, 12756, 12757, 12765, 12766, 12768, 12770, 12771, 12772, 12773, 12774, 12775, 12776, 12777, 12780, 12781, 12782, 12783, 12784, 12785, 12790, 12791, 12792, 12793, 12794, 12796, 12797, 12798, 12799, 12800, 12803, 12804, 12806, 12807, 12808, 12809, 12810, 12811, 12812, 12813, 12814, 12815, 12819, 12820, 12821, 12822, 12823, 12824, 12825, 12827, 12828, 12829, 12830, 12833, 12834, 12835, 12836, 12837, 12838, 12839,
LCID, 4, 0),()), "Companies": ((34107, LCID, 4, 0),()), "Importance": ((23, LCID, 4, 0),()), "MessageClass": ((26, LCID, 4, 0),()), "Mileage": ((34100, LCID, 4, 0),()), "NoAging": ((34062, LCID, 4, 0),()), "Sensitivity": ((54, LCID, 4, 0),()), "Subject": ((55, LCID, 4, 0),()), "UnRead": ((61468, LCID, 4, 0),()), } class _ReportItem(DispatchBaseClass): CLSID = IID('{00063026-0000-0000-C000-000000000046}') coclass_clsid = IID('{00061035-0000-0000-C000-000000000046}') def Close(self, SaveMode=defaultNamedNotOptArg): return self._oleobj_.InvokeTypes(61475, LCID, 1, (24, 0), ((3, 1),),SaveMode ) def Copy(self): ret = self._oleobj_.InvokeTypes(61490, LCID, 1, (9, 0), (),) if ret is not None: ret = Dispatch(ret, 'Copy', None) return ret def Delete(self): return self._oleobj_.InvokeTypes(61514, LCID, 1, (24, 0), (),) def Display(self, Modal=defaultNamedOptArg): return self._oleobj_.InvokeTypes(61606, LCID, 1, (24, 0), ((12, 17),),Modal ) def Move(self, DestFldr=defaultNamedNotOptArg): ret = self._oleobj_.InvokeTypes(61492, LCID, 1, (9, 0), ((9, 1),),DestFldr ) if ret is not None: ret = Dispatch(ret, 'Move', None) return ret def PrintOut(self): return self._oleobj_.InvokeTypes(61491, LCID, 1, (24, 0), (),) def Save(self): return self._oleobj_.InvokeTypes(61512, LCID, 1, (24, 0), (),) def SaveAs(self, Path=defaultNamedNotOptArg, Type=defaultNamedOptArg): return self._oleobj_.InvokeTypes(61521, LCID, 1, (24, 0), ((8, 1), (12, 17)),Path , Type) _prop_map_get_ = { # Method 'Actions' returns object of type 'Actions' "Actions": (63511, 2, (9, 0), (), "Actions", '{0006303E-0000-0000-C000-000000000046}'), # Method 'Application' returns object of type '_Application' "Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'), # Method 'Attachments' returns object of type 'Attachments' "Attachments": (63509, 2, (9, 0), (), "Attachments", '{0006303C-0000-0000-C000-000000000046}'), "BillingInformation": (34101, 2, (8, 0), (), "BillingInformation", None), "Body": (37120, 2, (8, 0), (), "Body", None), "Categories": (36865, 2, (8, 0), (), "Categories", None), "Class": (61450, 2, (3, 0), (), "Class", None), "Companies": (34107, 2, (8, 0), (), "Companies", None), "ConversationIndex": (113, 2, (8, 0), (), "ConversationIndex", None), "ConversationTopic": (112, 2, (8, 0), (), "ConversationTopic", None), "CreationTime": (12295, 2, (7, 0), (), "CreationTime", None), "EntryID": (61470, 2, (8, 0), (), "EntryID", None), # Method 'FormDescription' returns object of type 'FormDescription' "FormDescription": (61589, 2, (9, 0), (), "FormDescription", '{00063046-0000-0000-C000-000000000046}'), # Method 'GetInspector' returns object of type '_Inspector' "GetInspector": (61502, 2, (9, 0), (), "GetInspector", '{00063005-0000-0000-C000-000000000046}'), "Importance": (23, 2, (3, 0), (), "Importance", None), "LastModificationTime": (12296, 2, (7, 0), (), "LastModificationTime", None), # Method 'Links' returns object of type 'Links' "Links": (62469, 2, (9, 0), (), "Links", '{0006308A-0000-0000-C000-000000000046}'), "MAPIOBJECT": (61696, 2, (13, 0), (), "MAPIOBJECT", None), "MessageClass": (26, 2, (8, 0), (), "MessageClass", None), "Mileage": (34100, 2, (8, 0), (), "Mileage", None), "NoAging": (34062, 2, (11, 0), (), "NoAging", None), "OutlookInternalVersion": (34130, 2, (3, 0), (), "OutlookInternalVersion", None), "OutlookVersion": (34132, 2, (8, 0), (), "OutlookVersion", None), "Parent": (61441, 2, (9, 0), (), "Parent", None), "Saved": (61603, 2, (11, 0), (), "Saved", None), "Sensitivity": (54, 2, (3, 0), (), "Sensitivity", None), # Method 'Session' returns object of type '_NameSpace' "Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'), "Size": (3592, 2, (3, 0), (), "Size", None), "Subject": (55, 2, (8, 0), (), "Subject", None), "UnRead": (61468, 2, (11, 0), (), "UnRead", None), # Method 'UserProperties' returns object of type 'UserProperties' "UserProperties": (63510, 2, (9, 0), (), "UserProperties", '{0006303D-0000-0000-C000-000000000046}'), } _prop_map_put_ = { "BillingInformation": ((34101, LCID, 4, 0),()), "Body": ((37120, LCID, 4, 0),()), "Categories": ((36865, LCID, 4, 0),()), "Companies": ((34107, LCID, 4, 0),()), "Importance": ((23, LCID, 4, 0),()), "MessageClass": ((26, LCID, 4, 0),()), "Mileage": ((34100, LCID, 4, 0),()), "NoAging": ((34062, LCID, 4, 0),()), "Sensitivity": ((54, LCID, 4, 0),()), "Subject": ((55, LCID, 4, 0),()), "UnRead": ((61468, LCID, 4, 0),()), } class _SyncObject(DispatchBaseClass): CLSID = IID('{00063083-0000-0000-C000-000000000046}') coclass_clsid = IID('{00063084-0000-0000-C000-000000000046}') def Start(self): return self._oleobj_.InvokeTypes(8449, LCID, 1, (24, 0), (),) def Stop(self): return self._oleobj_.InvokeTypes(8450, LCID, 1, (24, 0), (),) _prop_map_get_ = { # Method 'Application' returns object of type '_Application' "Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'), "Class": (61450, 2, (3, 0), (), "Class", None), "Name": (8448, 2, (8, 0), (), "Name", None), "Parent": (61441, 2, (9, 0), (), "Parent", None), # Method 'Session' returns object of type '_NameSpace' "Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'), } _prop_map_put_ = { } class _TaskItem(DispatchBaseClass): CLSID = IID('{00063035-0000-0000-C000-000000000046}') coclass_clsid = IID('{00061032-0000-0000-C000-000000000046}') # Result is of type TaskItem def Assign(self): ret = self._oleobj_.InvokeTypes(63008, LCID, 1, (13, 0), (),) if ret is not None: # See if this IUnknown is really an IDispatch try: ret = ret.QueryInterface(pythoncom.IID_IDispatch) except pythoncom.error: return ret ret = Dispatch(ret, 'Assign', '{00061032-0000-0000-C000-000000000046}') return ret def CancelResponseState(self): return self._oleobj_.InvokeTypes(63010, LCID, 1, (24, 0), (),) def ClearRecurrencePattern(self): return self._oleobj_.InvokeTypes(61605, LCID, 1, (24, 0), (),) def Close(self, SaveMode=defaultNamedNotOptArg): return self._oleobj_.InvokeTypes(61475, LCID, 1, (24, 0), ((3, 1),),SaveMode ) def Copy(self): ret = self._oleobj_.InvokeTypes(61490, LCID, 1, (9, 0), (),) if ret is not None: ret = Dispatch(ret, 'Copy', None) return ret def Delete(self): return self._oleobj_.InvokeTypes(61514, LCID, 1, (24, 0), (),) def Display(self, Modal=defaultNamedOptArg): return self._oleobj_.InvokeTypes(61606, LCID, 1, (24, 0), ((12, 17),),Modal ) # Result is of type RecurrencePattern def GetRecurrencePattern(self): ret = self._oleobj_.InvokeTypes(61604, LCID, 1, (9, 0), (),) if ret is not None: ret = Dispatch(ret, 'GetRecurrencePattern', '{00063044-0000-0000-C000-000000000046}') return ret def MarkComplete(self): return self._oleobj_.InvokeTypes(62989, LCID, 1, (24, 0), (),) def Move(self, DestFldr=defaultNamedNotOptArg): ret = self._oleobj_.InvokeTypes(61492, LCID, 1, (9, 0), ((9, 1),),DestFldr ) if ret is not None: ret = Dispatch(ret, 'Move', None) return ret def PrintOut(self): return self._oleobj_.InvokeTypes(61491, LCID, 1, (24, 0), (),) # Result is of type TaskItem def Respond(self, Response=defaultNamedNotOptArg, fNoUI=defaultNamedNotOptArg, fAdditionalTextDialog=defaultNamedNotOptArg): ret = self._oleobj_.InvokeTypes(63009, LCID, 1, (13, 0), ((3, 1), (12, 1), (12, 1)),Response , fNoUI, fAdditionalTextDialog) if ret is not None: # See if this IUnknown is really an IDispatch try: ret = ret.QueryInterface(pythoncom.IID_IDispatch) except pythoncom.error: return ret ret = Dispatch(ret, 'Respond', '{00061032-0000-0000-C000-000000000046}') return ret def Save(self): return self._oleobj_.InvokeTypes(61512, LCID, 1, (24, 0), (),) def SaveAs(self, Path=defaultNamedNotOptArg, Type=defaultNamedOptArg): return self._oleobj_.InvokeTypes(61521, LCID, 1, (24, 0), ((8, 1), (12, 17)),Path , Type) def Send(self): return self._oleobj_.InvokeTypes(61557, LCID, 1, (24, 0), (),) def SkipRecurrence(self): return self._oleobj_.InvokeTypes(63012, LCID, 1, (11, 0), (),) def StatusReport(self): ret = self._oleobj_.InvokeTypes(62994, LCID, 1, (9, 0), (),) if ret is not None: ret = Dispatch(ret, 'StatusReport', None) return ret _prop_map_get_ = { # Method 'Actions' returns object of type 'Actions' "Actions": (63511, 2, (9, 0), (), "Actions", '{0006303E-0000-0000-C000-000000000046}'), "ActualWork": (33040, 2, (3, 0), (), "ActualWork", None), # Method 'Application' returns object of type '_Application' "Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'), # Method 'Attachments' returns object of type 'Attachments' "Attachments": (63509, 2, (9, 0), (), "Attachments", '{0006303C-0000-0000-C000-000000000046}'), "BillingInformation": (34101, 2, (8, 0), (), "BillingInformation", None), "Body": (37120, 2, (8, 0), (), "Body", None), "CardData": (33067, 2, (8, 0), (), "CardData", None), "Categories": (36865, 2, (8, 0), (), "Categories", None), "Class": (61450, 2, (3, 0), (), "Class", None), "Companies": (34107, 2, (8, 0), (), "Companies", None), "Complete": (33052, 2, (11, 0), (), "Complete", None), "ContactNames": (34108, 2, (8, 0), (), "ContactNames", None), "Contacts": (34106, 2, (8, 0), (), "Contacts", None), "ConversationIndex": (113, 2, (8, 0), (), "ConversationIndex", None), "ConversationTopic": (112, 2, (8, 0), (), "ConversationTopic", None), "CreationTime": (12295, 2, (7, 0), (), "CreationTime", None), "DateCompleted": (33039, 2, (7, 0), (), "DateCompleted", None), "DelegationState": (33066, 2, (3, 0), (), "DelegationState", None), "Delegator": (33057, 2, (8, 0), (), "Delegator", None), "DueDate": (33029, 2, (7, 0), (), "DueDate", None), "EntryID": (61470, 2, (8, 0), (), "EntryID", None), # Method 'FormDescription' returns object of type 'FormDescription' "FormDescription": (61589, 2, (9, 0), (), "FormDescription", '{00063046-0000-0000-C000-000000000046}'), # Method 'GetInspector' returns object of type '_Inspector' "GetInspector": (61502, 2, (9, 0), (), "GetInspector", '{00063005-0000-0000-C000-000000000046}'), "Importance": (23, 2, (3, 0), (), "Importance", None), "IsRecurring": (62999, 2, (11, 0), (), "IsRecurring", None), "LastModificationTime": (12296, 2, (7, 0), (), "LastModificationTime", None), # Method 'Links' returns object of type 'Links' "Links": (62469, 2, (9, 0), (), "Links", '{0006308A-0000-0000-C000-000000000046}'), "MAPIOBJECT": (61696, 2, (13, 0), (), "MAPIOBJECT", None), "MessageClass": (26, 2, (8, 0), (), "MessageClass", None), "Mileage": (34100, 2, (8, 0), (), "Mileage", None), "NoAging": (34062, 2, (11, 0), (), "NoAging", None), "Ordinal": (33059, 2, (3, 0), (), "Ordinal", None), "OutlookInternalVersion": (34130, 2, (3, 0), (), "OutlookInternalVersion", None), "OutlookVersion": (34132, 2, (8, 0), (), "OutlookVersion", None), "Owner": (33055, 2, (8, 0), (), "Owner", None), "Ownership": (33065, 2, (3, 0), (), "Ownership", None), "Parent": (61441, 2, (9, 0), (), "Parent", None), "PercentComplete": (63007, 2, (3, 0), (), "PercentComplete", None), # Method 'Recipients' returns object of type 'Recipients' "Recipients": (63508, 2, (9, 0), (), "Recipients", '{0006303B-0000-0000-C000-000000000046}'), "ReminderOverrideDefault": (34076, 2, (11, 0), (), "ReminderOverrideDefault", None), "ReminderPlaySound": (34078, 2, (11, 0), (), "ReminderPlaySound", None), "ReminderSet": (34051, 2, (11, 0), (), "ReminderSet", None), "ReminderSoundFile": (34079, 2, (8, 0), (), "ReminderSoundFile", None), "ReminderTime": (34050, 2, (7, 0), (), "ReminderTime", None), "ResponseState": (63011, 2, (3, 0), (), "ResponseState", None), "Role": (33063, 2, (8, 0), (), "Role", None), "Saved": (61603, 2, (11, 0), (), "Saved", None), "SchedulePlusPriority": (33071, 2, (8, 0), (), "SchedulePlusPriority", None), "Sensitivity": (54, 2, (3, 0), (), "Sensitivity", None), # Method 'Session' returns object of type '_NameSpace' "Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'), "Size": (3592, 2, (3, 0), (), "Size", None), "StartDate": (33028, 2, (7, 0), (), "StartDate", None), "Status": (33025, 2, (3, 0), (), "Status", None), "StatusOnCompletionRecipients": (3586, 2, (8, 0), (), "StatusOnCompletionRecipients", None), "StatusUpdateRecipients": (3587, 2, (8, 0), (), "StatusUpdateRecipients", None), "Subject": (55, 2, (8, 0), (), "Subject", None), "TeamTask": (33027, 2, (11, 0), (), "TeamTask", None), "TotalWork": (33041, 2, (3, 0), (), "TotalWork", None), "UnRead": (61468, 2, (11, 0), (), "UnRead", None), # Method 'UserProperties' returns object of type 'UserProperties' "UserProperties": (63510, 2, (9, 0), (), "UserProperties", '{0006303D-0000-0000-C000-000000000046}'), } _prop_map_put_ = { "ActualWork": ((33040, LCID, 4, 0),()), "BillingInformation": ((34101, LCID, 4, 0),()), "Body": ((37120, LCID, 4, 0),()), "CardData": ((33067, LCID, 4, 0),()), "Categories": ((36865, LCID, 4, 0),()), "Companies": ((34107, LCID, 4, 0),()), "Complete": ((33052, LCID, 4,
# verify a false deprecated option. assert config.use_multiprocess_workers is False def test_deprecated_env_aware_params(self): os_environ_unicode["SCALYR_DEFAULT_WORKERS_PER_API_KEY"] = "5" self._write_file_with_separator_conversion( """{ api_key: "hi there", } """ ) config = self._create_test_configuration_instance() config.parse() assert config.default_sessions_per_worker == 5 def test_deprecated_params_in_config_fragment(self): self._write_file_with_separator_conversion( """ { api_key: "hi there" api_keys: [ {"api_key": "key2", "id": "second_key"}, {"api_key": "key3", "id": "third_key", "workers": 3} ] } """ ) self._write_config_fragment_file_with_separator_conversion( "a.json", """ { api_keys: [ {"api_key": "key4", "id": "fourth_key", "workers": 3} ] logs: [ {"path": "some/path", "worker_id": "second_key"} ] } """, ) config = self._create_test_configuration_instance() config.parse() # check if workers from fragment are added. assert list(config.worker_configs) == [ JsonObject( api_key=config.api_key, id="default", sessions=config.default_sessions_per_worker, ), JsonObject( api_key="key2", id="second_key", sessions=config.default_sessions_per_worker, ), JsonObject(api_key="key3", id="third_key", sessions=3), JsonObject(api_key="key4", id="fourth_key", sessions=3), ] def test__verify_required_attributes(self): config = self._create_test_configuration_instance() # 1. Field is not a JSON object config_object = JsonObject({"field1": "a"}) field = "field1" self.assertRaisesRegexp( BadConfiguration, "is not a json object", config._Configuration__verify_required_attributes, config_object=config_object, field=field, config_description="", ) # 2. Field is not a JSON object config_object = JsonObject({"field1": "a"}) field = "field2" config._Configuration__verify_required_attributes( config_object=config_object, field=field, config_description="" ) # 3. Field is an object config_object = JsonObject({"field1": JsonObject({})}) field = "field1" config._Configuration__verify_required_attributes( config_object=config_object, field=field, config_description="" ) # 4. Field is an object, one field value can't be cast to string config_object = JsonObject({"field1": JsonObject({"foo": JsonArray([])})}) field = "field1" self.assertRaisesRegexp( BadConfiguration, "is not a string", config._Configuration__verify_required_attributes, config_object=config_object, field=field, config_description="", ) class TestParseArrayOfStrings(TestConfigurationBase): def test_none(self): self.assertIsNone(parse_array_of_strings(None)) def test_empty_string(self): self.assertEqual(parse_array_of_strings(""), ArrayOfStrings()) def test_list(self): self.assertEqual( parse_array_of_strings("a, b, c"), ArrayOfStrings(["a", "b", "c"]) ) class TestConvertConfigParam(TestConfigurationBase): def test_none_to_anything(self): """ """ self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", None, six.text_type), ) self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", None, bool) ) self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", None, int) ) self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", None, float) ) self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", None, list) ) self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", None, JsonArray), ) self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", None, JsonObject), ) self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", None, ArrayOfStrings), ) def test_empty_string(self): self.assertEqual("", convert_config_param("dummy_field", "", six.text_type)) self.assertEqual(False, convert_config_param("dummy_field", "", bool)) self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", "", int) ) self.assertRaises( BadConfiguration, lambda: convert_config_param("dummy_field", "", float) ) self.assertEqual( ArrayOfStrings(), convert_config_param("dummy_field", "", ArrayOfStrings) ) self.assertEqual( ArrayOfStrings(), convert_config_param( "dummy_field", "", SpaceAndCommaSeparatedArrayOfStrings ), ) self.assertRaises( IndexError, lambda: convert_config_param("dummy_field", "", JsonArray) ) self.assertRaises( IndexError, lambda: convert_config_param("dummy_field", "", JsonArray) ) def test_convert_to_float(self): self.assertEqual(5.0, convert_config_param("dummy_field", "5.0", float)) self.assertEqual(5.0, convert_config_param("dummy_field", 5.0, float)) self.assertEqual(5.0, convert_config_param("dummy_field", 5, float)) self.assertEqual(2.1, convert_config_param("dummy_field", "2.1", float)) self.assertEqual(2.1, convert_config_param("dummy_field", 2.1, float)) class TestGetConfigFromEnv(TestConfigurationBase): def test_get_empty_array_of_string(self): os.environ["SCALYR_K8S_IGNORE_NAMESPACES"] = "" self.assertEqual( ArrayOfStrings(), get_config_from_env( "k8s_ignore_namespaces", convert_to=SpaceAndCommaSeparatedArrayOfStrings ), ) os.environ["SCALYR_K8S_IGNORE_NAMESPACES"] = "a, b, c" self.assertEqual( ArrayOfStrings(["a", "b", "c"]), get_config_from_env( "k8s_ignore_namespaces", convert_to=SpaceAndCommaSeparatedArrayOfStrings ), ) del os.environ["SCALYR_K8S_IGNORE_NAMESPACES"] self.assertIsNone( get_config_from_env( "k8s_ignore_namespaces", convert_to=SpaceAndCommaSeparatedArrayOfStrings ) ) def test_get_empty_string(self): os.environ["SCALYR_K8S_API_URL"] = "" self.assertEqual( "", get_config_from_env("k8s_api_url", convert_to=six.text_type) ) del os.environ["SCALYR_K8S_API_URL"] self.assertIsNone(get_config_from_env("k8s_api_url", convert_to=six.text_type)) def test_get_empty_json_object(self): os.environ["SCALYR_SERVER_ATTRIBUTES"] = "" self.assertEqual( JsonObject(content={}), get_config_from_env("server_attributes", convert_to=JsonObject), ) del os.environ["SCALYR_SERVER_ATTRIBUTES"] self.assertEqual( None, get_config_from_env("server_attributes", convert_to=JsonObject), ) os.environ["SCALYR_SERVER_ATTRIBUTES"] = '{"serverHost": "foo1.example.com"}' self.assertEqual( JsonObject(content={"serverHost": "foo1.example.com"}), get_config_from_env("server_attributes", convert_to=JsonObject), ) os.environ[ "SCALYR_SERVER_ATTRIBUTES" ] = '{"serverHost": "foo1.example.com", "tier": "foo"}' self.assertEqual( JsonObject(content={"serverHost": "foo1.example.com", "tier": "foo"}), get_config_from_env("server_attributes", convert_to=JsonObject), ) os.environ[ "SCALYR_SERVER_ATTRIBUTES" ] = '{"serverHost": "foo1.example.com", "tier": "foo", "bar": "baz"}' self.assertEqual( JsonObject( content={"serverHost": "foo1.example.com", "tier": "foo", "bar": "baz"} ), get_config_from_env("server_attributes", convert_to=JsonObject), ) class FakeLogWatcher: def add_log_config(self, a, b): pass class TestJournaldLogConfigManager(TestConfigurationBase): def setUp(self): super(TestJournaldLogConfigManager, self).setUp() self._temp_dir = tempfile.mkdtemp() self._log_dir = os.path.join(self._temp_dir, "log") os.makedirs(self._log_dir) def get_configuration(self): default_paths = DefaultPaths( self.convert_path(self._log_dir), self.convert_path("/etc/scalyr-agent-2/agent.json"), self.convert_path("/var/lib/scalyr-agent-2"), ) return Configuration(self._config_file, default_paths, None) def get_configuration_with_logger(self): default_paths = DefaultPaths( self.convert_path(self._log_dir), self.convert_path("/etc/scalyr-agent-2/agent.json"), self.convert_path("/var/lib/scalyr-agent-2"), ) return Configuration( self._config_file, default_paths, scalyr_logging.AgentLogger("config_test") ) def test_default_config(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, None) matched_config = lcm.get_config("test") self.assertEqual("journald", matched_config["parser"]) matched_config = lcm.get_config("other_test") self.assertEqual("journald", matched_config["parser"]) def test_catchall_config(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_unit: ".*", parser: "TestParser" } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, None) matched_config = lcm.get_config("test") self.assertEqual("TestParser", matched_config["parser"]) matched_config = lcm.get_config("other_test") self.assertEqual("TestParser", matched_config["parser"]) def test_specific_config(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_unit: "test", parser: "TestParser" } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, None) matched_config = lcm.get_config("test") self.assertEqual("TestParser", matched_config["parser"]) matched_config = lcm.get_config("other_test") self.assertEqual("journald", matched_config["parser"]) def test_multiple_configs(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_unit: "test", parser: "TestParser" }, { journald_unit: "confirm", parser: "ConfirmParser" } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, None) matched_config = lcm.get_config("test") self.assertEqual("TestParser", matched_config["parser"]) matched_config = lcm.get_config("other_test") self.assertEqual("journald", matched_config["parser"]) matched_config = lcm.get_config("confirm") self.assertEqual("ConfirmParser", matched_config["parser"]) def test_regex_config(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_unit: "test.*test", parser: "TestParser" } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, None) matched_config = lcm.get_config("testtest") self.assertEqual("TestParser", matched_config["parser"]) matched_config = lcm.get_config("other_test") self.assertEqual("journald", matched_config["parser"]) matched_config = lcm.get_config("test_somethingarbitrary:test") self.assertEqual("TestParser", matched_config["parser"]) def test_glob_config(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_globs: { "unit": "test*test" }, parser: "TestParser" } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, None) matched_config = lcm.get_config({"unit": "testtest"}) self.assertEqual("TestParser", matched_config["parser"]) matched_config = lcm.get_config({"unit": "other_test"}) self.assertEqual("journald", matched_config["parser"]) matched_config = lcm.get_config({"unit": "test_somethingarbitrary:test"}) self.assertEqual("TestParser", matched_config["parser"]) def test_multiple_glob_config(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_globs: { "unit": "test*test", "container": "f?obar" }, parser: "TestParser" } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, None) # test matches both matched_config = lcm.get_config({"unit": "testtest", "container": "frobar"}) self.assertEqual("TestParser", matched_config["parser"]) # test when matches one glob but not the other matched_config = lcm.get_config({"unit": "testtest", "container": "foobaz"}) self.assertNotEqual("TestParser", matched_config["parser"]) self.assertEqual("journald", matched_config["parser"]) # test when matches one glob, but other one missing matched_config = lcm.get_config({"unit": "test_other_test"}) self.assertNotEqual("TestParser", matched_config["parser"]) self.assertEqual("journald", matched_config["parser"]) # no matches, should use default parser matched_config = lcm.get_config({"unit": "bar", "container": "bar"}) self.assertEqual("journald", matched_config["parser"]) def test_unit_regex_and_globs_both_defined(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_globs: { "baz": "test*test", "container": "f?obar" }, parser: "TestParser", journald_unit: "scalyr" } ] } """ ) config = self.get_configuration() config.parse() # self.assertRaises( BadMonitorConfiguration, self.assertRaises(BadConfiguration, lambda: LogConfigManager(config, None)) def test_big_config(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_unit: "test", parser: "TestParser", redaction_rules: [ { match_expression: "a", replacement: "yes" } ], sampling_rules: [ { match_expression: "INFO", sampling_rate: 0.1} ], attributes: { webServer: "true" } } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, None) matched_config = lcm.get_config("test") # NOTE: We need to sort the values since we can't rely on dict ordering expected = [{"match_expression": "a", "replacement": "yes"}] expected[0] = sorted(expected[0].items()) actual = list(matched_config["redaction_rules"]) actual[0] = sorted(actual[0].items()) self.assertEqual(expected, actual) expected = [{"match_expression": "INFO", "sampling_rate": 0.1}] expected[0] = sorted(expected[0].items()) actual = list(matched_config["sampling_rules"]) actual[0] = sorted(actual[0].items()) self.assertEqual(expected, actual) self.assertEqual("true", matched_config["attributes"]["webServer"]) def test_default_logger(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, JournaldLogFormatter()) lcm.set_log_watcher(FakeLogWatcher()) logger = lcm.get_logger("test") logger.info("Find this string") expected_path = os.path.join( self._log_dir, "journald_monitor.log", ) with open(expected_path) as f: self.assertTrue("Find this string" in f.read()) def test_modified_default_logger(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_unit: ".*", parser: "TestParser" } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, JournaldLogFormatter()) lcm.set_log_watcher(FakeLogWatcher()) logger = lcm.get_logger("test") logger.info("Find this string") expected_path = os.path.join( self._log_dir, "journald_monitor.log", ) with open(expected_path) as f: self.assertTrue("Find this string" in f.read()) def test_specific_logger(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_unit: "TEST", parser: "TestParser" } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, JournaldLogFormatter()) lcm.set_log_watcher(FakeLogWatcher()) logger = lcm.get_logger("TEST") logger.info("Find this string") logger2 = lcm.get_logger("Other") logger2.info("Other thing") expected_path = os.path.join( self._log_dir, "journald_" + six.text_type(hash("TEST")) + ".log", ) with open(expected_path) as f: self.assertTrue("Find this string" in f.read()) expected_path = os.path.join( self._log_dir, "journald_monitor.log", ) with open(expected_path) as f: self.assertTrue("Other thing" in f.read()) def test_regex_logger(self): self._write_file_with_separator_conversion( """ { api_key: "hi", journald_logs: [ { journald_unit: "test.*test", parser: "TestParser" } ] } """ ) config = self.get_configuration() config.parse() lcm = LogConfigManager(config, JournaldLogFormatter()) lcm.set_log_watcher(FakeLogWatcher()) logger = lcm.get_logger("testestestestestest") logger.info("Find this string") logger2 = lcm.get_logger("Other") logger2.info("Other thing") expected_path = os.path.join( self._log_dir, "journald_" + six.text_type(hash("test.*test")) + ".log", ) with open(expected_path) as f: self.assertTrue("Find this string" in f.read()) expected_path = os.path.join( self._log_dir, "journald_monitor.log", ) with open(expected_path) as f: self.assertTrue("Other thing" in f.read()) def test__verify_or_set_optional_int_with_min_and_max_value(self): config = self._create_test_configuration_instance() # 1. Valid value1 config_object = JsonObject(content={"foo": 10}) config._Configuration__verify_or_set_optional_int( config_object=config_object, field="foo", default_value=None, config_description=None, min_value=10, max_value=100, ) self.assertEqual(config_object["foo"], 10) config_object = JsonObject(content={"foo": 50}) config._Configuration__verify_or_set_optional_int( config_object=config_object, field="foo", default_value=None, config_description=None, min_value=10, max_value=100, ) self.assertEqual(config_object["foo"],
verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `proxy_put_namespaced_pod`") resource_path = '/api/v1/proxy/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json') method = 'PUT' path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] if 'name' in params: path_params['name'] = params['name'] query_params = {} header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='str', auth_settings=auth_settings, callback=params.get('callback')) return response def proxy_post_namespaced_pod(self, namespace, name, **kwargs): """ proxy POST requests to Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.proxy_post_namespaced_pod(namespace, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str name: name of the Pod (required) :return: str If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'name'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method proxy_post_namespaced_pod" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `proxy_post_namespaced_pod`") # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `proxy_post_namespaced_pod`") resource_path = '/api/v1/proxy/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json') method = 'POST' path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] if 'name' in params: path_params['name'] = params['name'] query_params = {} header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='str', auth_settings=auth_settings, callback=params.get('callback')) return response def proxy_delete_namespaced_pod(self, namespace, name, **kwargs): """ proxy DELETE requests to Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.proxy_delete_namespaced_pod(namespace, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str name: name of the Pod (required) :return: str If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'name'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method proxy_delete_namespaced_pod" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `proxy_delete_namespaced_pod`") # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `proxy_delete_namespaced_pod`") resource_path = '/api/v1/proxy/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json') method = 'DELETE' path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] if 'name' in params: path_params['name'] = params['name'] query_params = {} header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='str', auth_settings=auth_settings, callback=params.get('callback')) return response def proxy_options_namespaced_pod(self, namespace, name, **kwargs): """ proxy OPTIONS requests to Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.proxy_options_namespaced_pod(namespace, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str name: name of the Pod (required) :return: str If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'name'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method proxy_options_namespaced_pod" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `proxy_options_namespaced_pod`") # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `proxy_options_namespaced_pod`") resource_path = '/api/v1/proxy/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json') method = 'OPTIONS' path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] if 'name' in params: path_params['name'] = params['name'] query_params = {} header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='str', auth_settings=auth_settings, callback=params.get('callback')) return response def proxy_get_namespaced_pod_7(self, namespace, name, path, **kwargs): """ proxy GET requests to Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.proxy_get_namespaced_pod_7(namespace, name, path, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str name: name of the Pod (required) :param str path: path to the resource (required) :return: str If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'name', 'path'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method proxy_get_namespaced_pod_7" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `proxy_get_namespaced_pod_7`") # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `proxy_get_namespaced_pod_7`") # verify the required parameter 'path' is set if ('path' not in params) or (params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `proxy_get_namespaced_pod_7`") resource_path = '/api/v1/proxy/namespaces/{namespace}/pods/{name}/{path}'.replace('{format}', 'json') method = 'GET' path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] if 'name' in params: path_params['name'] = params['name'] if 'path' in params: path_params['path'] = params['path'] query_params = {} header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='str', auth_settings=auth_settings, callback=params.get('callback')) return response def proxy_head_namespaced_pod_8(self, namespace, name, path, **kwargs): """ proxy HEAD requests to Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.proxy_head_namespaced_pod_8(namespace, name, path, callback=callback_function) :param callback function: The callback
import pandas as pd import numpy as np from collections import defaultdict from sklearn.preprocessing import scale from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.mixture import GaussianMixture, BayesianGaussianMixture from sklearn import metrics import hdbscan from scipy.cluster import hierarchy from fastcluster import linkage from fancyimpute import KNN import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # plt.style.use('seaborn-white') class Preprocessing: def __init__(self, csv_path, varlist=None, verbose=False): ''' path -- the string of the csv file representing our raw dataset varlist -- the list of strings ''' # import the csv dataset as a pandas DataFrame self.df = pd.read_csv(csv_path) # change index (row labels) self.df = self.df.set_index('Country Code', verify_integrity=True) # only keep the variables(columns) selected by user if varlist: varlist = ['Country Name'] + varlist self.df = self.df[varlist] # convert all columns but Country Names to numeric type self.df.iloc[:, 1:] = \ self.df.iloc[:, 1:].apply(pd.to_numeric, errors='coerce') # report poor features and selected_countries if verbose: feature_miss = self.df.isnull().sum() country_miss = self.df.isnull().sum(axis=1) feature_miss = \ feature_miss[feature_miss != 0].sort_values(ascending=False) country_miss = \ country_miss[country_miss != 0].sort_values(ascending=False) print('MISSING VALUES FOR EACH FEATURE:') print(feature_miss, '\n') print('MISSING VALUES FOR EACH COUNTRY:') print(country_miss) # def drop_poor_columns(self, p): # ''' Drop the columns of self.df with more than p (%) missing values''' # # # create df with a the count of missing values for each column # missing_df = pd.DataFrame(self.df.isnull().sum()) # # extract the names of columns with more than p (%) missing values # poor_columns = missing_df.loc[missing_df[0] > p*len(self.df)].index # # drop sparse columns # self.df.drop(poor_columns, axis=1, inplace=True) # return self.df, poor_columns def dropPoorFeatures(self, axis, p): ''' Drop the rows/columns of self.df with more than p (%) missing values axis -- indicate whether to drop rows (axis=0) or columns(axis=1) ''' # create df with the count of missing values for each row/column missing_df = pd.DataFrame(self.df.isnull().sum(axis=int(not axis))) # extract the names of rows/columns with more than p (%) missing values if axis == 0: length = len(self.df.columns) else: length = len(self.df) poor_features = missing_df.loc[missing_df[0] > p*length].index # drop sparse rows/columns self.df.drop(poor_features, axis=axis, inplace=True) return self.df, poor_features def imputeKNN(self): # df is my data frame with the missings. I keep only floats self.country_names = self.df['Country Name'].values df_numeric = self.df.select_dtypes(include=[np.float64]).values # impute missing values df_filled_KNN = pd.DataFrame( KNN(k=2, verbose=False).complete(df_numeric)) df_filled_KNN.insert( loc=0, column='Country Names', value=self.country_names) df_filled_KNN.columns = self.df.columns df_filled_KNN.index = self.df.index return df_filled_KNN def exportCSV(self, path, impute=False): if not impute: # export the cleaned dataframe to a csv file self.df.to_csv(path) else: # impute the missing values before exporting to csv self.df_filled_KNN = self.imputeKNN() self.df_filled_KNN.to_csv(path) def heatmap(df, links): ''' Plot a matrix dataset as a hierarchically-clustered heatmap, using given linkages. ''' cmap = sns.cubehelix_palette( as_cmap=True, start=.5, rot=-.75, light=.9) sns.clustermap( data=df, row_linkage=links, col_cluster=False, cmap=cmap) class Clustering: def __init__(self, csv_path, verbose=False): self.df = pd.read_csv(csv_path) # change index (row labels) self.df = self.df.set_index('Country Code', verify_integrity=True) # df.info(verbose=False) # store country full names (for plots) before removing the feature self.country_names = self.df['Country Name'].values self.df = self.df.drop(['Country Name'], axis=1) # scale the dataset to be distributed as a standard Gaussian cols = self.df.columns ind = self.df.index self.df = pd.DataFrame(scale(self.df)) self.df.columns = cols self.df.index = ind # create disctionary of clusters self.clusterings = defaultdict(lambda: np.array(0)) self.clusterings_labels = defaultdict(lambda: np.array(0)) # print general info if verbose: print('The imported dataset as the following characteristics:') print(self.df.info(verbose=False)) def getPC(self): ''' Calculate the principal components (PC) and create a new DataFrame by projecting the datapoints on the PC space. ''' self.pca = PCA() self.pca_loadings = pd.DataFrame( PCA().fit(self.df).components_.T, index=self.df.columns) self.df_pc = pd.DataFrame( self.pca.fit_transform(self.df), index=self.df.index) # plot the cumulated proportion of variance explained by the PC print('CUMULATIVE PROPORTION OF VARIANCE EXPLAINED BY PCs') plt.figure(figsize=(7, 5)) plt.plot(range(1, len(self.pca.components_)+1), self.pca.explained_variance_ratio_, '-o', label='Individual component') plt.plot(range(1, len(self.pca.components_)+1), np.cumsum(self.pca.explained_variance_ratio_), '-s', label='Cumulative') plt.ylabel('Proportion of Variance Explained') plt.xlabel('Principal Component') plt.xlim(0.75, 4.25) plt.ylim(0, 1.05) plt.xticks(range(1, len(self.pca.components_)+1)) plt.legend(loc=2) def plotAlongPC(self, pc1=0, pc2=1, xlim=[-5, 5], ylim=[-5, 5], loadings=True, clustering=None): ''' Plot the countries along the two principal components given in input: pc1[int] (usually = 0, indicating the first PC) and pc2[int] ''' fig, ax1 = plt.subplots(figsize=(9, 7)) ax1.set_xlim(xlim[0], xlim[1]) ax1.set_ylim(ylim[0], ylim[1]) if clustering is not None: # build a generator of colors NUM_COLORS = len(self.clusterings[clustering]) clist = np.random.uniform(low=0, high=1, size=(NUM_COLORS, 4)) # plot countries along PCs coloring them according to their cluster labels = self.clusterings_labels[clustering] for i, country in enumerate(self.df_pc.index): ax1.annotate(country, (self.df_pc[pc1].loc[country], -self.df_pc[pc2].loc[country]), ha='center', color=clist[labels[i]], fontweight='bold') else: # plot countries along PCs for i in self.df_pc.index: ax1.annotate(i, (self.df_pc[pc1].loc[i], -self.df_pc[pc2].loc[i]), ha='center', color='b', fontweight='bold') # Plot reference lines ax1.hlines(0, -5, 5, linestyles='dotted', colors='grey') ax1.vlines(0, -5, 5, linestyles='dotted', colors='grey') pc1_string = 'Principal Component ' + str(pc1) pc2_string = 'Principal Component ' + str(pc2) ax1.set_xlabel(pc1_string) ax1.set_ylabel(pc2_string) if loadings: # Plot Principal Component loading vectors, using a second y-axis. ax2 = ax1.twinx().twiny() ax2.set_ylim(-1, 1) ax2.set_xlim(-1, 1) ax2.tick_params(axis='y', colors='orange') # ax2.set_xlabel('Principal Component loading vectors', # color='orange') # Plot labels for vectors. # 'a' is an offset parameter to separate arrow tip and text. a = 1.07 for i in self.pca_loadings[[pc1, pc2]].index: ax2.annotate(i, (self.pca_loadings[pc1].loc[i]*a, -self.pca_loadings[pc2].loc[i]*a), color='orange') # Plot vectors for k in range(len(self.pca_loadings.columns)): ax2.arrow(0, 0, self.pca_loadings[pc1][k], -self.pca_loadings[pc2][k], width=0.002, color='black') return def plotDendrogram(self, links, threshold, metric, method): plt.figure(figsize=(15, 9)) den_title = 'METHOD: ' + str(method) + ' METRIC: ' + str(metric) plt.title(den_title) den = hierarchy.dendrogram(links, orientation='right', labels=self.country_names, color_threshold=threshold, leaf_font_size=10) plt.vlines(threshold, 0, plt.gca().yaxis.get_data_interval()[1], colors='r', linestyles='dashed') return den def clustersTable(self, clustering): ''' Clustering is an array of cluster labels, one for each country ''' lis = sorted( list(zip(clustering, self.country_names)), key=lambda x: x[0]) groups = set(map(lambda x: x[0], lis)) table = pd.DataFrame(list( zip(groups, [[y[1] for y in lis if y[0] == x] for x in groups]))) table.columns = ['Cluster', ''] table.set_index('Cluster', inplace=True, verify_integrity=False) return table def saveClustering(self, cluster_labels, clustering_name): # save clusterings into a dict and rename its columns self.clusterings[clustering_name] = \ self.clustersTable(cluster_labels) self.clusterings[clustering_name].columns = [clustering_name] self.clusterings_labels[clustering_name] = cluster_labels def hierarchicalClustering( self, metric, method, threshold=None, on_PC=0, heatmap=False): ''' Show figures of clusters retrieved through the hierachical method and return an array with the cluster index of each country. metric -- [str] used for assigning distances to data: 'euclidean', 'ćorrelation', 'cosine', 'seuclidean'... method -- [str] the type of linkage used for agglomerating the nodes 'average','complete','ward'...(check fastcluster full list) threshold -- [int] threshold distance for separing clusters, in the hierachical tree. on_PC -- [int] apply clustering by using data projections on the first on_PC principal components ''' if on_PC > 0: df = self.df_pc.iloc[:, :on_PC+1] else: df = self.df if method == 'all': method = ['average', 'complete', 'single', 'weighted', 'centroid', # only for Euclidean data 'median', # only for Euclidean data 'ward', # only for Euclidean data ] elif type(method) != list: method = list([method]) metric = str(metric) for met in method: # set up the linking tool links = linkage(df, metric=metric, method=met) self.link = links # plot dendrogram self.plotDendrogram(links, threshold, metric, met) if heatmap: heatmap(df, links) labels = hierarchy.fcluster(links, threshold, criterion='distance') # save clusters self.saveClustering( labels, 'hc_'+str(met)+'_'+str(metric)+'_'+str(threshold)) # self.hierarchical_classes = get_hierarchical_classes(den) # plt.savefig('tree2.png') def hdbscan(self, min_cluster_size=2, on_PC=0): '''compute clusters using HDBSCAN algorithm''' if on_PC > 0: df = self.df_pc.iloc[:, :on_PC+1] else: df = self.df clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size) clusterer.fit_predict(df) # save clusters self.saveClustering(clusterer.labels_, 'hdbscan') def bayesianGaussianMixture(self, n_components, covariance_type='full', n_init=50, on_PC=0): ''' Compute Bayesian Gaussian Mixture clustering. Note: in this case, the number of components effectively used can be < n_componentss (at most, n_components). ''' if on_PC > 0: df = self.df_pc.iloc[:, :on_PC+1] else: df = self.df clusterer = BayesianGaussianMixture(n_components, covariance_type=covariance_type, n_init=n_init) labels = clusterer.fit(df).predict(df) # save clusters self.saveClustering(labels, 'bayesian gm' + str(n_components)) def gaussianMixture(self, n_components, covariance_type='full', n_init=50, on_PC=0): '''compute Gaussian Mixture clustering''' if on_PC > 0: df = self.df_pc.iloc[:, :on_PC+1] else: df = self.df clusterer = GaussianMixture(n_components, covariance_type=covariance_type, n_init=n_init) labels = clusterer.fit(df).predict(df) # save clusters self.saveClustering(labels, 'gm' + str(n_components)) def gmBIC(self, n_min, n_max, covariance_type='full', n_init=50, on_PC=0): if on_PC > 0: df = self.df_pc.iloc[:, :on_PC+1] else: df = self.df '''compute Bayesian Information Criterion''' n_components = np.arange(n_min, n_max) models = [ GaussianMixture(n, covariance_type=covariance_type, n_init=n_init) for n in n_components] bics = [model.fit(df).bic(df) for model in models] bics = np.array(bics) # store the optimal number of gaussian components and the resulting
+ log(0.0001 + m.x33))*m.x33 + (0.00380676639045951 + log(0.0001 + m.x34))*m.x34 + (-4.17602428751368e-5 + log(0.0001 + m.x35))*m.x35 + (-9.99950003332973e-5 + log( 0.0001 + m.x36))*m.x36 + (7.31660988298921e-5 + log(0.0001 + m.x37))*m.x37 + ( 0.000352695405055885 + log(0.0001 + m.x38))*m.x38 + (0.00241633549088715 + log(0.0001 + m.x39))* m.x39 + (0.00899927846319621 + log(0.0001 + m.x40))*m.x40 + (-9.99950003332973e-5 + log(0.0001 + m.x41))*m.x41 + (0.0110427672968248 + log(0.0001 + m.x42))*m.x42 + (0.0585747865999338 + log( 0.0001 + m.x43))*m.x43 + (2.46797152845802 + log(0.0001 + m.x44))*m.x44 + (2.71882080946593 + log(0.0001 + m.x45))*m.x45 + (1.7476473253307 + log(0.0001 + m.x46))*m.x46 + (0.492166514580875 + log(0.0001 + m.x47))*m.x47 + (8.51152888064576 + log(0.0001 + m.x48))*m.x48 + ( 8.39857726588301 + log(0.0001 + m.x49))*m.x49 + (2.24200380048815 + log(0.0001 + m.x50))*m.x50 + (7.04445194421227 + log(0.0001 + m.x51))*m.x51 + (2.61388982017163 + log(0.0001 + m.x52))*m.x52 + (0.492922058386827 + log(0.0001 + m.x53))*m.x53 + (5.94265253093074 + log(0.0001 + m.x54))* m.x54 + (8.02737875283731 + log(0.0001 + m.x55))*m.x55 + (8.80346881150627 + log(0.0001 + m.x56)) *m.x56 + (8.31864293053978 + log(0.0001 + m.x57))*m.x57 + (8.02740276388566 + log(0.0001 + m.x58) )*m.x58 + (7.96040519686369 + log(0.0001 + m.x59))*m.x59 + (8.07151007717336 + log(0.0001 + m.x60 ))*m.x60 + (8.20161484116001 + log(0.0001 + m.x61))*m.x61 + (8.17975180483618 + log(0.0001 + m.x62))*m.x62 + (8.54978200943058 + log(0.0001 + m.x63))*m.x63 + (8.32979457692237 + log(0.0001 + m.x64))*m.x64 + (8.30174003930177 + log(0.0001 + m.x65))*m.x65 + (8.40175999237212 + log( 0.0001 + m.x66))*m.x66 + (8.63430012420156 + log(0.0001 + m.x67))*m.x67 + (8.84142632943575 + log(0.0001 + m.x68))*m.x68 + (3.49868286907897 + log(0.0001 + m.x69))*m.x69 + (5.98155369637624 + log(0.0001 + m.x70))*m.x70 + (0.897805493423784 + log(0.0001 + m.x71))*m.x71 + ( 5.84282806624924 + log(0.0001 + m.x72))*m.x72 + (7.15163930988535 + log(0.0001 + m.x73))*m.x73 + (8.33485869852893 + log(0.0001 + m.x74))*m.x74 + (7.10243773878911 + log(0.0001 + m.x75))*m.x75 + (8.18533639606999 + log(0.0001 + m.x76))*m.x76 + (7.11675459599093 + log(0.0001 + m.x77))* m.x77 + (8.36947775561834 + log(0.0001 + m.x78))*m.x78 + (8.31422349259207 + log(0.0001 + m.x79)) *m.x79 + (8.40549380688731 + log(0.0001 + m.x80))*m.x80 + (8.50997918428044 + log(0.0001 + m.x81) )*m.x81 + (8.49260998103509 + log(0.0001 + m.x82))*m.x82 + (8.77532469142121 + log(0.0001 + m.x83 ))*m.x83 + (8.61019907118565 + log(0.0001 + m.x84))*m.x84 + (8.58850517112631 + log(0.0001 + m.x85))*m.x85 + (8.6652073686555 + log(0.0001 + m.x86))*m.x86 + (8.83628073911711 + log(0.0001 + m.x87))*m.x87 + (8.97937592054743 + log(0.0001 + m.x88))*m.x88 + (2.46852338503202 + log(0.0001 + m.x89))*m.x89 + (1.91720477903018 + log(0.0001 + m.x90))*m.x90 + (0.60963506730143 + log( 0.0001 + m.x91))*m.x91 + (7.38303503552861 + log(0.0001 + m.x92))*m.x92 + (6.61385500454867 + log(0.0001 + m.x93))*m.x93 + (1.56112005206572 + log(0.0001 + m.x94))*m.x94 + (1.67212033596968 + log(0.0001 + m.x95))*m.x95 + (8.71024303823382 + log(0.0001 + m.x96))*m.x96 + ( 8.04629045267352 + log(0.0001 + m.x97))*m.x97 + (3.05689092938026 + log(0.0001 + m.x98))*m.x98 + (2.77627317179529 + log(0.0001 + m.x99))*m.x99 + (2.65781234961273 + log(0.0001 + m.x100))*m.x100 + (2.76157378758184 + log(0.0001 + m.x101))*m.x101 + (2.94451426260394 + log(0.0001 + m.x102))* m.x102 + (2.9103779378897 + log(0.0001 + m.x103))*m.x103 + (3.56424783498832 + log(0.0001 + m.x104))*m.x104 + (3.15400406049806 + log(0.0001 + m.x105))*m.x105 + (3.15563321478178 + log( 0.0001 + m.x106))*m.x106 + (3.25368032355531 + log(0.0001 + m.x107))*m.x107 + (3.56729155553715 + log(0.0001 + m.x108))*m.x108 + (3.89727368454517 + log(0.0001 + m.x109))*m.x109 + ( 7.84277533552815 + log(0.0001 + m.x110))*m.x110 + (2.19913338431261 + log(0.0001 + m.x111))* m.x111 + (3.90768611099482 + log(0.0001 + m.x112))*m.x112 + (3.33509403927957 + log(0.0001 + m.x113))*m.x113 + (2.0670555036041 + log(0.0001 + m.x114))*m.x114 + (8.01043548723776 + log( 0.0001 + m.x115))*m.x115 + (8.23700408370598 + log(0.0001 + m.x116))*m.x116 + (7.48899828481342 + log(0.0001 + m.x117))*m.x117 + (6.69277917551656 + log(0.0001 + m.x118))*m.x118 + ( 6.00440056991849 + log(0.0001 + m.x119))*m.x119 + (2.33422060199767 + log(0.0001 + m.x120))* m.x120 + (4.38202389120357 + log(0.0001 + m.x121))*m.x121 + (3.93196564798733 + log(0.0001 + m.x122))*m.x122 + (3.83720979620516 + log(0.0001 + m.x123))*m.x123 + (3.9958542346516 + log( 0.0001 + m.x124))*m.x124 + (4.19245563097434 + log(0.0001 + m.x125))*m.x125 + (4.1584779699594 + log(0.0001 + m.x126))*m.x126 + (4.80813930163156 + log(0.0001 + m.x127))*m.x127 + ( 4.40084433175423 + log(0.0001 + m.x128))*m.x128 + (4.35376754229126 + log(0.0001 + m.x129))* m.x129 + (4.52599814103411 + log(0.0001 + m.x130))*m.x130 + (4.98916919739898 + log(0.0001 + m.x131))*m.x131 + (5.53557762000157 + log(0.0001 + m.x132))*m.x132 + (6.15602660370378 + log( 0.0001 + m.x133))*m.x133 + (2.11210428885156 + log(0.0001 + m.x134))*m.x134 + (2.45661908041142 + log(0.0001 + m.x135))*m.x135 + (2.00013214016287 + log(0.0001 + m.x136))*m.x136 + ( 2.31679227603326 + log(0.0001 + m.x137))*m.x137 + (3.36162416865471 + log(0.0001 + m.x138))* m.x138 + (3.04201965723089 + log(0.0001 + m.x139))*m.x139 + (5.73445833708332 + log(0.0001 + m.x140))*m.x140 + (8.90114635113409 + log(0.0001 + m.x141))*m.x141 + (5.54218905373136 + log( 0.0001 + m.x142))*m.x142 + (0.687036263468994 + log(0.0001 + m.x143))*m.x143 + (3.72420872020866 + log(0.0001 + m.x144))*m.x144 + (6.83909807250432 + log(0.0001 + m.x145))*m.x145 + ( 5.82289074941966 + log(0.0001 + m.x146))*m.x146 + (2.26462607492883 + log(0.0001 + m.x147))* m.x147 + (4.5798321084754 + log(0.0001 + m.x148))*m.x148 + (4.16426782525903 + log(0.0001 + m.x149))*m.x149 + (4.05113746927093 + log(0.0001 + m.x150))*m.x150 + (3.66741575890297 + log( 0.0001 + m.x151))*m.x151 + (3.87789152471349 + log(0.0001 + m.x152))*m.x152 + (3.843853793511 + log(0.0001 + m.x153))*m.x153 + (4.49510648673207 + log(0.0001 + m.x154))*m.x154 + ( 4.08669693847842 + log(0.0001 + m.x155))*m.x155 + (3.74622639683532 + log(0.0001 + m.x156))* m.x156 + (3.54203680144453 + log(0.0001 + m.x157))*m.x157 + (3.59364527803368 + log(0.0001 + m.x158))*m.x158 + (3.46826364567878 + log(0.0001 + m.x159))*m.x159 + (7.31670928750062 + log( 0.0001 + m.x160))*m.x160 + (2.44916368424722 + log(0.0001 + m.x161))*m.x161 + (7.29277637325645 + log(0.0001 + m.x162))*m.x162 + (5.90941532502158 + log(0.0001 + m.x163))*m.x163 + ( 2.56132843435225 + log(0.0001 + m.x164))*m.x164 + (5.4180982651191 + log(0.0001 + m.x165))*m.x165 + (5.00693727762858 + log(0.0001 + m.x166))*m.x166 + (4.89472352365685 + log(0.0001 + m.x167))* m.x167 + (4.5134446079815 + log(0.0001 + m.x168))*m.x168 + (4.72269615886495 + log(0.0001 + m.x169))*m.x169 + (4.68887416726387 + log(0.0001 + m.x170))*m.x170 + (5.33442285471652 + log( 0.0001 + m.x171))*m.x171 + (4.93000588885534 + log(0.0001 + m.x172))*m.x172 + (4.5918265656241 + log(0.0001 + m.x173))*m.x173 + (4.38868200858199 + log(0.0001 + m.x174))*m.x174 + ( 4.44004605492317 + log(0.0001 + m.x175))*m.x175 + (4.31523716019784 + log(0.0001 + m.x176))* m.x176 + (5.13783653504752 + log(0.0001 + m.x177))*m.x177 + (3.11448969600567 + log(0.0001 + m.x178))*m.x178 + (2.22623931490397 + log(0.0001 + m.x179))*m.x179 + (6.95485918222627 + log( 0.0001 + m.x180))*m.x180 + (2.58156120424789 + log(0.0001 + m.x181))*m.x181 + (3.32112048693262 + log(0.0001 + m.x182))*m.x182 + (2.91102603690908 + log(0.0001 + m.x183))*m.x183 + ( 2.74378869519635 + log(0.0001 + m.x184))*m.x184 + (2.80981617985678 + log(0.0001 + m.x185))* m.x185 + (2.88815630249741 + log(0.0001 + m.x186))*m.x186 + (2.8540164766265 + log(0.0001 + m.x187))*m.x187 + (3.50797943462438 + log(0.0001 + m.x188))*m.x188 + (3.09767041342839 + log( 0.0001 + m.x189))*m.x189 + (3.03194913525907 + log(0.0001 + m.x190))*m.x190 + (3.16129628255377 + log(0.0001 + m.x191))*m.x191 + (3.36148541087378 + log(0.0001 + m.x192))*m.x192 + ( 3.56105733179929 + log(0.0001 + m.x193))*m.x193 + (2.96929525808926 + log(0.0001 + m.x194))* m.x194 + (8.05509985681069 + log(0.0001 + m.x195))*m.x195 + (4.62306761413426 + log(0.0001 + m.x196))*m.x196 + (6.63126951518314 + log(0.0001 + m.x197))*m.x197 + (5.29591913142778 + log( 0.0001 + m.x198))*m.x198 + (5.82345626456382 + log(0.0001 + m.x199))*m.x199 + (8.20285185414306 + log(0.0001 + m.x200))*m.x200 + (7.21550827235487 + log(0.0001 + m.x201))*m.x201 + ( 4.68180505779346 + log(0.0001 + m.x202))*m.x202 + (2.93695025457178 + log(0.0001 + m.x203))* m.x203 + (3.97734762963171 + log(0.0001 + m.x204))*m.x204 + (2.14049727736834 + log(0.0001 + m.x205))*m.x205 + (8.23177823570219 + log(0.0001 + m.x206))*m.x206 + (6.52679204672164 + log( 0.0001 + m.x207))*m.x207 + (4.08065078408676 + log(0.0001 + m.x208))*m.x208 + (3.18246487127354 + log(0.0001 + m.x209))*m.x209 + (1.89357096896066 + log(0.0001 + m.x210))*m.x210 + ( 4.31395709114783 + log(0.0001 + m.x211))*m.x211 + (3.07443196583763 + log(0.0001 + m.x212))* m.x212 + (2.93161031555576 + log(0.0001 + m.x213))*m.x213 + (3.00709878379221 + log(0.0001 + m.x214))*m.x214 + (3.3183666475389 + log(0.0001 + m.x215))*m.x215 + (3.14129889837036 + log( 0.0001 + m.x216))*m.x216 + (3.1071764688144 + log(0.0001 + m.x217))*m.x217 + (3.76067711414489 + log(0.0001 + m.x218))*m.x218 + (3.35069221096413 + log(0.0001 + m.x219))*m.x219 + ( 3.27975274298894 + log(0.0001 + m.x220))*m.x220 + (3.66214409354723 + log(0.0001 + m.x221))* m.x221 + (4.25595537649539 + log(0.0001 + m.x222))*m.x222 + (5.24752386937103 + log(0.0001 + m.x223))*m.x223 + (3.22794506413392 + log(0.0001 + m.x224))*m.x224 + (2.55435372044492 + log( 0.0001 + m.x225))*m.x225 + (5.06824206594274 + log(0.0001 + m.x226))*m.x226 + (9.05430175980402 + log(0.0001 + m.x227))*m.x227 + (7.62027102879143 + log(0.0001 + m.x228))*m.x228 + ( 3.46697792865685 + log(0.0001 + m.x229))*m.x229 + (0.604423267719963 + log(0.0001 + m.x230))* m.x230 + (0.897229137135356 + log(0.0001 +
party)) or (any(ally.stress > 0 for ally in party) and any(ally.stressHealer for ally in party))): if len(enemies_not_dead_already) == 1: enemy = enemies_not_dead_already[0] if 'barbaric_yawp' in hero.skills and hero.barbaric_yawp_count < 3 and (hero.rank == 1 or hero.rank == 2) \ and (1 in enemy.rank or 2 in enemy.rank) and not enemy.stunned \ and stun_chance - enemy.stunResist >= 50: attack = 'barbaric_yawp' elif hero.rank != 1 and (enemy.threat < 4 or (enemy.stunned and enemy.canBeKilledIn1Hit)): attack = 'swap' # fix if party is out of position if attack is None and ((hero.rank == 2 and party[0].heroClass in BackLineClasses) or (hero.rank == 3 and party[1].heroClass not in FrontLineClasses) or (hero.rank == 4 and party[2].heroClass not in FrontLineClasses)): attack = 'swap' # stun two enemies with yawp if specific conditions are met if attack is None and 'barbaric_yawp' in hero.skills and hero.barbaric_yawp_count < 3 \ and (hero.rank == 1 or hero.rank == 2): rank1_enemy = next((enemy for enemy in enemies_not_dead_already if 1 in enemy.rank and not enemy.alreadyMoved), None) rank2_enemy = next((enemy for enemy in enemies_not_dead_already if 2 in enemy.rank and 1 not in enemy.rank and not enemy.alreadyMoved), None) if rank1_enemy is not None and rank2_enemy is not None: high_threat_enemies_in_back = [enemy for enemy in enemies_not_dead_already if enemy.threat >= 4 and 1 not in enemy.rank and 2 not in enemy.rank] if len(high_threat_enemies_in_back) == 0 and len(enemies_not_dead_already) < 4 \ and not rank1_enemy.stunned and stun_chance - rank1_enemy.stunResist >= 50 \ and not rank2_enemy.stunned and stun_chance - rank2_enemy.stunResist >= 50: attack = 'barbaric_yawp' # Find target and attack enemy if attack == 'swap': swap_hero(hero, swap_distance, UpdatedPartyOrder, debug=Debug) else: if attack is not None: list_of_attacks.insert(0, attack) find_target_and_attack(raid_info, enemy_formation, hero, party, list_of_attacks, UpdatedPartyOrder) def crusader_action(party_sorted_by_rank, hero, raid_info, enemy_formation): """[ Ideal skill load-out: smite, stunning blow, holy lance, inspiring cry ]""" global UpdatedPartyOrder party = party_sorted_by_rank list_of_attacks = ['smite', 'holy_lance'] stall_count = raid_info['battle']['round_stall_count'] stall_accelerated = raid_info['battle']['previous_stall_accelerated'] stall = False if stall_count >= 2 or stall_accelerated else True attack, stun_chance, target = None, None, None swap_distance = 1 if 'stunning_blow' in hero.skills: stun_level = hero.skills['stunning_blow'] stun_chance = AttackSkills['stunning_blow'][2][stun_level] enemies_not_dead_already = [enemy for enemy in enemy_formation if not enemy.alreadyGoingToDie] enemy = enemies_not_dead_already[0] if len(enemies_not_dead_already) > 0 else None # stall if only one weak enemy left and need to heal or stress heal if (len(enemies_not_dead_already) == 0 or (len(enemies_not_dead_already) == 1 and (enemies_not_dead_already[0].threat < 4 or enemies_not_dead_already[0].stunned) and stall)) \ and ((any(ally.percentHp < 80 for ally in party) and any(ally.healer for ally in party)) or (any(ally.stress > 0 for ally in party) and any(ally.stressHealer for ally in party))): if len(enemies_not_dead_already) == 1 \ and not (enemy.name == '<NAME>' and (3 in enemy.rank or 4 in enemy.rank)): enemy = enemies_not_dead_already[0] can_stun = stun_chance - enemy.stunResist >= 55 if 'inspiring_cry' in hero.skills \ and (any(ally.effectiveHp == 0 for ally in party) or (enemy.threat <= 2 or enemy.stunned or (enemy.threat < 4 and enemy.canBeKilledIn1Hit) and any(ally.stress > 0 for ally in party))): attack = 'inspiring_cry' elif (1 == hero.rank or 2 == hero.rank) and (2 in enemy.rank or 1 in enemy.rank) \ and not enemy.stunned and can_stun and not enemy.canBeKilledIn1Hit: attack = 'stunning_blow' target = enemy else: attack = 'swap' if hero.rank != 1 or (hero.rank == 1 and party[1].heroClass not in BackLineClasses) \ else None swap_distance = -1 if hero.rank == 1 and party[1].heroClass not in BackLineClasses else 1 # stress heal if main threat is dealt with, or heal if ally is on deaths door or can stop from reaching deaths door if attack is None and stall and 'inspiring_cry' in hero.skills \ and (any(ally.effectiveHp == 0 for ally in party) or (len(enemies_not_dead_already) < 3 and any(ally.stress > 0 for ally in party) and not any(enemy.threat > 2 or (enemy.threat > 3 and not enemy.stunned) for enemy in enemies_not_dead_already))): attack = 'inspiring_cry' # stun enemy if can't kill if attack is None and (hero.rank == 1 or hero.rank == 2) and 'stunning_blow' in hero.skills: if any(stun_chance - enemy.stunResist >= 55 and not enemy.stunned and not enemy.canBeKilledIn1Hit and (1 in enemy.rank or 2 in enemy.rank) for enemy in enemies_not_dead_already): attack = 'stunning_blow' elif attack is None or (hero.rank == 3 or hero.rank == 4): # holy lance if rank 3 or 4, and not front line on next rank, and enemy on rank 2 if any(2 in enemy.rank or 3 in enemy.rank or 4 in enemy.rank for enemy in enemies_not_dead_already) \ and 'holy_lance' in hero.skills and party[hero.rank-2].heroClass not in FrontLineClasses: attack = 'holy_lance' elif hero.rank == 3 and party[1] in BackLineClasses \ or (not any(ally.stress > 0 for ally in party) and party[hero.rank-2].heroClass not in FrontLineClasses): attack = 'swap' elif 'inspiring_cry' in hero.skills: attack = 'inspiring_cry' else: attack = 'swap' if attack == 'swap': swap_hero(hero, swap_distance, UpdatedPartyOrder, debug=Debug) elif attack == 'stunning_blow': find_target_and_stun(hero, enemies_not_dead_already, attack, stun_chance, UpdatedPartyOrder, target) elif attack == 'inspiring_cry': target = next((ally for ally in party if ally.currentHp == 0), None) if target is None: target = next((ally for ally in party if ally.effectiveHp == 0), None) if target is None: party.sort(key=lambda k: k.stress, reverse=True) target = party[0] heal_target(hero, target, attack, debug=Debug) else: # Find target and attack enemy if attack is not None: list_of_attacks.insert(0, attack) find_target_and_attack(raid_info, enemy_formation, hero, party, list_of_attacks, UpdatedPartyOrder) def find_target_and_attack(raid_info, enemy_formation, hero, party, list_of_attacks, party_order): global attack_completed attack_completed = False current_round = raid_info['battle']['round'] stall_count = raid_info['battle']['round_stall_count'] stall_accelerated = raid_info['battle']['previous_stall_accelerated'] stall = False if stall_count >= 2 or stall_accelerated else True enemies_not_dead_already = [enemy for enemy in enemy_formation if not enemy.alreadyGoingToDie] enemy = enemies_not_dead_already[0] if len(enemies_not_dead_already) > 0 else None # filter out skills if hero is not in the correct rank or does not have equipped viable_attacks = [attack for attack in list_of_attacks if attack in hero.skills and hero.rank in AttackSkills[attack][0]] # stall by attacking corpse if 1 enemy left if (len(enemies_not_dead_already) == 0 or (len(enemies_not_dead_already) == 1 and (stall or (enemy.canBeKilledIn1Hit and any(ally is not hero and not ally.already_moved and not ally.stunned and (((ally.rank == 1 or ally.rank == 2) and (ally.heroClass in FrontLineClasses)) or (ally.heroClass not in BackLineClasses and ally.rank != 4)) for ally in party))) and current_round < 8)) \ and ((any(ally.percentHp < 80 for ally in party) and any(ally.healer for ally in party)) or (any(ally.stress > 0 for ally in party) and any(ally.stressHealer for ally in party))): if len(enemies_not_dead_already) == 1: if ((enemy.threat < 4 and not (enemy.name == 'Bone Arbalist' and (3 in enemy.rank or 4 in enemy.rank)) and not enemy.isTank) or (enemy.stunned and not enemy.isTank)): targets = [enemy for enemy in enemy_formation if enemy.name == 'Large Corpse' or enemy.name == 'Corpse'] choose_best_attack(hero, targets, viable_attacks, party_order) # if enemy is a very high threat (>=7, stress attacker) and not going to die already if not attack_completed: targets = [enemy for enemy in enemy_formation if enemy.threat >= 7 and not enemy.alreadyGoingToDie] if len(targets) > 0: sort_targets(targets) choose_best_attack(hero, targets, viable_attacks, party_order) # if enemy is a high threat (>=4, high threat) and not going to die already if not attack_completed: targets = [enemy for enemy in enemy_formation if enemy.threat >= 4 and not enemy.alreadyGoingToDie] if len(targets) > 0: sort_targets(targets) choose_best_attack(hero, targets, viable_attacks, party_order) # if enemy isn't stunned, can be killed in one hit, isn't going to die from blight or bleed if not attack_completed: targets = [enemy for enemy in enemy_formation if not enemy.stunned and not enemy.alreadyGoingToDie and enemy.canBeKilledIn1Hit and enemy.threat > 1] if len(targets) > 0: sort_targets(targets) choose_best_attack(hero, targets, viable_attacks, party_order) # if corpse in rank 1 or 2 and enemy in rank 3 is skeleton archer if not attack_completed: targets = [] if any(enemy.name == 'Bone Arbalist' and not enemy.alreadyGoingToDie for enemy in enemy_formation): if len(enemy_formation) > 2 and 'Bone Arbalist' == enemy_formation[2].name: if 'Corpse' == enemy_formation[0].name or 'Large Corpse' == enemy_formation[0].name: targets.append(enemy_formation[0]) if 'Corpse' == enemy_formation[1].name or 'Large Corpse' == enemy_formation[1].name: targets.append(enemy_formation[1]) elif 'Large Corpse' == enemy_formation[0].name and 'Bone Arbalist' == enemy_formation[1].name: targets.append(enemy_formation[0])
<reponame>jglaser/DeepSpeed<filename>deepspeed/runtime/fp16/onebit/lamb.py ''' Copyright 2021 The Microsoft DeepSpeed Team ''' import types import torch import numpy as np import torch.distributed as dist from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors class OnebitLamb(torch.optim.Optimizer): """Implements the 1-bit Lamb algorithm. Currently GPU-only. For usage example please see https://www.deepspeed.ai/tutorials/onebit-lamb/ For technical details please see our paper https://arxiv.org/abs/2104.06069. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) freeze_step (int, optional): Number of steps for warmup (uncompressed) stage before we start using compressed communication. (default 100000) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0) min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in 1-bit Lamb! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) cuda_aware (boolean, required): Set True if the underlying MPI implementation supports CUDA-Aware communication. (default: False) comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') coeff_beta (float, optional): coefficient used for computing running averages of lamb coefficient (default: 0.9) note that you may want to increase or decrease this beta depending on the freeze_step you choose, as 1/(1 - coeff_beta) should be smaller than or equal to freeze_step factor_max (float, optional): maximum value of scaling factor to the frozen lamb coefficient during compression stage (default: 4.0) factor_min (float, optional): minimum value of scaling factor to the frozen lamb coefficient during compression stage (default: 0.5) factor_threshold (float, optional): threshold of how much the scaling factor can fluctuate between steps (default: 0.1) .. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, deepspeed=None, lr=1e-3, freeze_step=100000, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0., max_grad_norm=0., max_coeff=10.0, min_coeff=0.01, amsgrad=False, cuda_aware=False, comm_backend_name='nccl', coeff_beta=0.9, factor_max=4.0, factor_min=0.5, factor_threshold=0.1): if amsgrad: raise RuntimeError('1-bit Lamb does not support the AMSGrad variant.') defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm, max_coeff=max_coeff, min_coeff=min_coeff) super(OnebitLamb, self).__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 assert (dist.is_initialized()) self.deepspeed = deepspeed self.lamb_freeze_key = False self.initialize = False self.freeze_step = freeze_step self.cuda_aware = cuda_aware self.coeff_beta = coeff_beta self.factor_max = factor_max self.factor_min = factor_min self.factor_threshold = factor_threshold self.using_pipeline = False self.comm_backend_name = comm_backend_name # Empty initializer. Set handle based on the comm backend as follows. self.comm_backend_handle = None if self.comm_backend_name == 'nccl': TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) assert TORCH_MAJOR >= 1 and TORCH_MINOR >= 8, "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend" assert dist.is_initialized() == True, "Please initialize the torch distributed backend." from deepspeed.runtime.comm.nccl import NcclBackend self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') self.comm_backend_handle = NcclBackend(self.deepspeed.mpu) elif self.comm_backend_name == 'mpi': from deepspeed.runtime.comm.mpi import MpiBackend self.comm_backend_handle = MpiBackend(cuda_aware) self.size = self.comm_backend_handle.size self.divider = int(self.size * 8 / np.gcd(self.size, 8)) self.exp_avg_flat = [] self.dummy_exp_avg = {} self.corrected_tensor_sizes = [] self.server_chunk_sizes = [] self.worker_errors = [] self.server_errors = [] self.lamb_coeffs = [] def step(self, closure=None, grads=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads #remove the previous stats del self.lamb_coeffs[:] if self.lamb_freeze_key: exp_avg_last_step = [] for group in self.param_groups: exp_avg_last_step.append( [self.state[p]['exp_avg'].detach().clone() for p in group['params']]) if 'scaling_coeff' not in self.state[self.param_groups[0]['params'][0]]: # Compute the scaling_coeff for each momentum at the end of warmup stage. # This is used to reduce compression error during compression stage. momentum_scales = [] for group in self.param_groups: momentum_scales.append([ (torch.norm(self.state[p]['exp_avg']) / np.sqrt(torch.numel(self.state[p]['exp_avg']))).item() for p in group['params'] ]) united_scale = sum([sum(x) for x in momentum_scales]) / sum( [len(x) for x in momentum_scales]) for i, group in enumerate(self.param_groups): for j, p in enumerate(group['params']): self.state[p][ 'scaling_coeff'] = united_scale / momentum_scales[i][j] for group, grads_this_group in zip(self.param_groups, grads_group): if grads_this_group is None: grads_this_group = [None] * len(group['params']) bias_correction = 1 if group['bias_correction'] else 0 for p, grad in zip(group['params'], grads_this_group): if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError('1-bit Lamb does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0 or (len(state) == 1 and 'scaling_coeff' in state.keys()): state['step'] = 0 state['lamb_coeff_freeze'] = 0.0 state['last_factor'] = 1.0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) state['exp_avg_sq_fresh'] = torch.zeros_like(p.data) if not self.initialize: self.lamb_freeze_key = True exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_sq_fresh'] beta1, beta2 = group['betas'] max_coeff = group['max_coeff'] min_coeff = group['min_coeff'] state['step'] += 1 if self.lamb_freeze_key is False: # warmup stage, baseline Lamb optimization exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) if state['step'] == self.freeze_step: exp_avg_sq_fresh.data = exp_avg_sq.detach().clone() grad = None if self.initialize: weight_norm = p.data.pow(2).sum().sqrt() update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data update_norm = update.pow(2).sum().sqrt() lamb_coeff = 1.0 if weight_norm != 0 and update_norm != 0: lamb_coeff = (weight_norm / update_norm).item() if lamb_coeff > max_coeff: lamb_coeff = max_coeff if lamb_coeff < min_coeff: lamb_coeff = min_coeff if lamb_coeff != 1.0: state['lamb_coeff_freeze'] = self.coeff_beta * state[ 'lamb_coeff_freeze'] + (1 - self.coeff_beta) * lamb_coeff self.lamb_coeffs.append(lamb_coeff) with torch.no_grad(): p.add_(-group['lr'] * lamb_coeff * update) else: # compression stage, update each momentum locally, then # communicate based on the compressed_allreduce below if self.initialize: exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg.mul_(self.state[p]['scaling_coeff']) grad = None # init fused momentum if len(self.exp_avg_flat) == 0: momentum_groups = [] tensor_size = 0 for group in self.param_groups: for p in group['params']: momentum_groups.append(self.state[p]['exp_avg']) tensor_size += torch.numel(p.data) corrected_tensor_size = tensor_size if tensor_size % (self.size * self.divider) != 0: difference = ((self.size * self.divider) - (tensor_size % (self.size * self.divider))) corrected_tensor_size += difference self.dummy_exp_avg[0] = torch.zeros( difference, device=momentum_groups[0].data.device) momentum_groups.append(self.dummy_exp_avg[0]) self.corrected_tensor_sizes.append(corrected_tensor_size) self.server_chunk_sizes.append(corrected_tensor_size // self.size) self.exp_avg_flat.append( _flatten_dense_tensors([p.detach().clone() for p in momentum_groups])) updated_params = _unflatten_dense_tensors(self.exp_avg_flat[0], momentum_groups) for p, q in zip(momentum_groups, updated_params): p.data = q.data if self.initialize and len(self.worker_errors) == 0: torch.cuda.empty_cache() for i in range(len(self.exp_avg_flat)): self.worker_errors.append( torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device)) self.server_errors.append( torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device)) torch.cuda.empty_cache() if self.lamb_freeze_key: if self.size > 1: for i in range(len(self.exp_avg_flat)): if not self.initialize: torch.cuda.empty_cache() self.worker_errors.append( torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device)) self.server_errors.append( torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device)) torch.cuda.empty_cache() if torch.distributed.get_rank() == 0: print("Cupy Buffers Initialized Successfully.") self.comm_backend_handle.compressed_allreduce( self.exp_avg_flat[i], self.worker_errors[0], self.server_errors[0], self.deepspeed.local_rank) if torch.distributed.get_rank() == 0: print('Pop out errors', flush=True) del self.worker_errors[:] del self.server_errors[:] else: self.comm_backend_handle.compressed_allreduce( self.exp_avg_flat[i], self.worker_errors[i], self.server_errors[i], self.deepspeed.local_rank) if self.lamb_freeze_key and self.initialize: for i, group in enumerate(self.param_groups): bias_correction = 1 if group['bias_correction'] else 0 for j, p in enumerate(group['params']): state = self.state[p] exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_sq_fresh'] beta1, beta2 = group['betas'] exp_avg.div_(self.state[p]['scaling_coeff']) # Because 1-bit compression cannot represent exact zero, it is required to # provide a momentum mask for those params that have constant exact zeros in their # momentums, otherwise the compression error would keep accumulating. # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight # always have exact zeros in its momentum for row 129 to 512, because it only
# # utils module - common functions for reportbug UIs # Written by <NAME> <<EMAIL>> # Copyright (C) 1999-2008 <NAME> # Copyright (C) 2008-2014 <NAME> <<EMAIL>> # # This program is freely distributable per the following license: # ## Permission to use, copy, modify, and distribute this software and its ## documentation for any purpose and without fee is hereby granted, ## provided that the above copyright notice appears in all copies and that ## both that copyright notice and this permission notice appear in ## supporting documentation. ## ## I DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL ## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL I ## BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY ## DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, ## WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS ## SOFTWARE. import sys import os import re import platform try: import pwd from tempfiles import TempFile, tempfile_prefix, cleanup_temp_file except ImportError, e: if platform.system() == 'Windows': pass else: print e sys.exit(1) import commands import shlex import rfc822 import socket import subprocess from urlutils import open_url from string import ascii_letters, digits # Paths for dpkg DPKGLIB = '/var/lib/dpkg' AVAILDB = os.path.join(DPKGLIB, 'available') STATUSDB = os.path.join(DPKGLIB, 'status') # Headers other than these become email headers for debbugs servers PSEUDOHEADERS = ('Package', 'Source', 'Version', 'Severity', 'File', 'Tags', 'Justification', 'Followup-For', 'Owner', 'User', 'Usertags', 'Forwarded', 'Control') MODES = {'novice': 'Offer simple prompts, bypassing technical questions.', 'standard': 'Offer more extensive prompts, including asking about ' 'things that a moderately sophisticated user would be expected to ' 'know about Debian.', 'advanced' : 'Like standard, but assumes you know a bit more about ' 'Debian, including "incoming".', 'expert': 'Bypass most handholding measures and preliminary triage ' 'routines. This mode should not be used by people unfamiliar with ' 'Debian\'s policies and operating procedures.'} MODELIST = ['novice', 'standard', 'advanced', 'expert'] for mode in MODELIST: exec 'MODE_%s=%d' % (mode.upper(), MODELIST.index(mode)) del mode # moved here since it needs the MODE_* vars to be defined import debbugs # it needs to be imported after debbugs import ui.text_ui as ui from reportbug.ui import AVAILABLE_UIS NEWBIELINE = """Dear Maintainer, *** Reporter, please consider answering these questions, where appropriate *** * What led up to the situation? * What exactly did you do (or not do) that was effective (or ineffective)? * What was the outcome of this action? * What outcome did you expect instead? *** End of the template - remove these template lines ***""" fhs_directories = ['/', '/usr', '/usr/share', '/var', '/usr/X11R6', '/usr/man', '/usr/doc', '/usr/bin'] # A map between suites and distributions names SUITES2DISTS = {'squeeze': 'oldstable', 'wheezy': 'stable', 'jessie': 'testing', 'sid': 'unstable', 'experimental': 'experimental'} def realpath(filename): filename = os.path.abspath(filename) bits = filename.split('/') for i in range(2, len(bits)+1): component = '/'.join(bits[0:i]) if component in fhs_directories: continue if os.path.islink(component): resolved = os.readlink(component) (dir, file) = os.path.split(component) resolved = os.path.normpath(os.path.join(dir, resolved)) newpath = apply(os.path.join, [resolved] + bits[i:]) return realpath(newpath) return filename pathdirs = ['/usr/sbin', '/usr/bin', '/sbin', '/bin', '/usr/X11R6/bin', '/usr/games'] def search_path_for(filename): d, f = os.path.split(filename) if d: return realpath(filename) path = os.environ.get("PATH", os.defpath).split('/') for d in pathdirs: if not d in path: path.append(d) for d in path: fullname = os.path.join(d, f) if os.path.exists(fullname): return realpath(fullname) return None def which_editor(specified_editor=None): """ Determine which editor program to use. :parameters: `specified_editor` Specified editor for reportbug, to be used in preference to other settings. :return value: Command to invoke for selected editor program. """ debian_default_editor = "/usr/bin/sensible-editor" for editor in [ specified_editor, os.environ.get("VISUAL"), os.environ.get("EDITOR"), debian_default_editor]: if editor: break return editor def glob_escape(filename): filename = re.sub(r'([*?\[\]])', r'\\\1', filename) return filename def search_pipe(searchfile, use_dlocate=True): arg = commands.mkarg(searchfile) if use_dlocate and os.path.exists('/usr/bin/dlocate'): pipe = os.popen('COLUMNS=79 dlocate -S %s 2>/dev/null' % arg) else: use_dlocate = False pipe = os.popen('COLUMNS=79 dpkg --search %s 2>/dev/null' % arg) return (pipe, use_dlocate) def query_dpkg_for(filename, use_dlocate=True): try: x = os.getcwd() except OSError: os.chdir('/') searchfilename = glob_escape(filename) (pipe, dlocate_used) = search_pipe(searchfilename, use_dlocate) packages = {} for line in pipe: line = line.strip() # Ignore diversions if 'diversion by' in line: continue (package, path) = line.split(': ', 1) path = path.strip() packlist = package.split(', ') for package in packlist: if packages.has_key(package): packages[package].append(path) else: packages[package] = [path] pipe.close() # Try again without dlocate if no packages found if not packages and dlocate_used: return query_dpkg_for(filename, use_dlocate=False) return filename, packages def find_package_for(filename, pathonly=False): """Find the package(s) containing this file.""" packages = {} # tries to match also files in /var/lib/dpkg/info/ if filename.startswith('/var/lib/dpkg/info/'): dpkg_info = re.compile('/var/lib/dpkg/info/(.+)\.[^.]+') m = dpkg_info.match(filename) # callee want a dict as second pair element... packages[m.group(1)]='' return (filename, packages) if filename[0] == '/': fn, pkglist = query_dpkg_for(filename) if pkglist: return fn, pkglist newfilename = search_path_for(filename) if pathonly and not newfilename: return (filename, None) return query_dpkg_for(newfilename or filename) def find_rewritten(username): for filename in ['/etc/email-addresses']: if os.path.exists(filename): try: fp = file(filename) except IOError: continue for line in fp: line = line.strip().split('#')[0] if not line: continue try: name, alias = line.split(':') if name.strip() == username: return alias.strip() except ValueError: print 'Invalid entry in %s' % filename return None def check_email_addr(addr): """Simple check for email validity""" if '@' not in addr: return False if addr.count('@') != 1: return False localpart, domainpart = addr.split('@') if localpart.startswith('.') or localpart.endswith('.'): return False if '.' not in domainpart: return False if domainpart.startswith('.') or domainpart.endswith('.'): return False return True def get_email_addr(addr): addr = rfc822.AddressList(addr) return addr.addresslist[0] def get_email(email='', realname=''): return get_email_addr(get_user_id(email, realname)) def get_user_id(email='', realname='', charset='utf-8'): uid = os.getuid() info = pwd.getpwuid(uid) email = (os.environ.get('REPORTBUGEMAIL', email) or os.environ.get('DEBEMAIL') or os.environ.get('EMAIL')) email = email or find_rewritten(info[0]) or info[0] if '@' not in email: if os.path.exists('/etc/mailname'): domainname = file('/etc/mailname').readline().strip() else: domainname = socket.getfqdn() email = email+'@'+domainname # Handle EMAIL if it's formatted as 'Bob <bob@host>'. if '<' in email or '(' in email: realname, email = get_email_addr(email) if not realname: realname = (os.environ.get('DEBFULLNAME') or os.environ.get('DEBNAME') or os.environ.get('NAME')) if not realname: realname = info[4].split(',', 1)[0] # Convert & in gecos field 4 to capitalized logname: #224231 realname = realname.replace('&', info[0].upper()) if not realname: return email # Decode the realname from the charset - # but only if it is not already in Unicode if isinstance(realname, str): realname = realname.decode(charset, 'replace') if re.match(r'[\w\s]+$', realname): return u'%s <%s>' % (realname, email) addr = rfc822.dump_address_pair( (realname, email) ) if isinstance(addr, str): addr = addr.decode('utf-8', 'replace') return addr statuscache = {} def get_package_status(package, avail=False): if not avail and package in statuscache: return statuscache[package] versionre = re.compile('Version: ') packagere = re.compile('Package: ') priorityre = re.compile('Priority: ') dependsre = re.compile('(Pre-)?Depends: ') recsre = re.compile('Recommends: ') suggestsre = re.compile('Suggests: ') conffilesre = re.compile('Conffiles:') maintre = re.compile('Maintainer: ') statusre = re.compile('Status: ') originre = re.compile('Origin: ') bugsre = re.compile('Bugs: ') descre = re.compile('Description: ') fullre = re.compile(' ') srcre = re.compile('Source: ') sectionre = re.compile('Section: ') pkgversion = pkgavail = maintainer = status = origin = None bugs = vendor = priority = desc = src_name = section = None conffiles = [] fulldesc = [] depends = [] recommends = [] suggests = [] confmode = False state = '' try: x = os.getcwd() except OSError: os.chdir('/') packarg = commands.mkarg(package) if avail: output = commands.getoutput( "COLUMNS=79 dpkg --print-avail %s 2>/dev/null" % packarg) else: output = commands.getoutput( "COLUMNS=79 dpkg --status %s 2>/dev/null" % packarg) # dpkg output is in UTF-8 format output = output.decode('utf-8', 'replace') for line in output.split(os.linesep): line = line.rstrip() if not line: continue if confmode: if line[:2] != ' /': confmode = False else: # re is used to identify also conffiles with spaces in the name conffiles = conffiles + [re.findall(' (.+) ([^ ]+)$', line)[0]] if versionre.match(line): (crud, pkgversion) = line.split(": ", 1) elif statusre.match(line): (crud, status) = line.split(": ", 1) elif priorityre.match(line): (crud, priority) = line.split(": ", 1) elif packagere.match(line): (crud, pkgavail) = line.split(": ", 1) elif originre.match(line): (crud, origin) = line.split(": ", 1) elif bugsre.match(line): (crud, bugs) = line.split(": ", 1) elif descre.match(line): (crud, desc) = line.split(": ", 1) elif dependsre.match(line): (crud, thisdepends) = line.split(": ", 1) # Remove versioning crud thisdepends = [[y.split()[0] for y in x.split('|')] for x in (thisdepends.split(', '))] depends.extend(thisdepends) elif recsre.match(line): (crud, thisdepends) = line.split(": ",
<reponame>cgiraldo/platform-data-mgmnt """ Copyright (c) 2016 Cisco and/or its affiliates. This software is licensed to you under the terms of the Apache License, Version 2.0 (the "License"). You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 The code, technical concepts, and all information contained herein, are the property of Cisco Technology, Inc.and/or its affiliated entities, under various laws including copyright, international treaties, patent, and/or contract. Any use of the material herein must be in accordance with the terms of the License. All rights not expressly granted by the License are reserved. Unless required by applicable law or agreed to separately in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. Purpose: Run jobs periodically to clean log files and manage datasets as per policy """ import json import logging from logging.config import fileConfig import os import posixpath as path import re import subprocess import time import traceback from functools import partial from functools import wraps import happybase import swiftclient from pyhdfs import HdfsClient, HdfsFileNotFoundException import boto.s3 from endpoint import Platform NEG_SIZE = 2 FNULL = open(os.devnull, 'w') def delete(hdfs, file_path): """ Delete file from HDFS Filesystem :param hdfs: :param file_path: :return: """ logging.debug("Delete HDFS File:%s", file_path) hdfs.delete(file_path) def archive(container_path, hdfs, file_path): """ Archive contents of file onto swift container :param container_path: :param hdfs: :param file_path: :return: """ logging.info("Archive file onto swift container %s", file_path) archive_path = container_path try: file_date = re.findall(r"=(\w*)", file_path) if file_date: subprocess.call(['hdfs', 'dfs', '-mkdir', '-p', container_path + '/' + file_date[0]], stderr=FNULL) archive_path = path.join(container_path, file_date[0], '-'.join(file_date) + '-' + path.basename(file_path)) logging.info("swift archive path %s", archive_path) subprocess.check_output(['hdfs', 'dfs', '-cp', file_path, archive_path]) delete(hdfs, file_path) except subprocess.CalledProcessError as cpe: logging.error('CPE:failed to archive {%s} with following error{%s}', file_path, cpe.message) except ValueError as value_error: logging.error('VE:failed to archive {%s} with following error{%s}', file_path, value_error.message) def check_threshold(): """ Check threshold value :return: True """ def decorator(func): """ Decorator :param func: :return: """ @wraps(func) def wrapper(*args, **kwargs): """ Wrapper function :param args: :param kwargs: :return: """ result = func(*args, **kwargs) logging.debug("file modification time={%s} and age={%s}", result, args[0] * 1000) if result <= (args[0] * 1000): return True return wrapper return decorator @check_threshold() def extract_age(retention_age, hdfs, name): # pylint: disable=unused-argument """ Extract age of a HDFS file, retention age is passed on as argument to decorator function and used by check_threshold :param retention_age: Age specified since 1970 determines whether file should be retained or not :param hdfs: Object reference to access HDFS file system :param name: name of file :return: Last modified """ last_modified = hdfs.get_file_status(name).modificationTime return last_modified def extract_size(hdfs, name): """ Extract size of a HDFS file. :param hdfs: :param name: :return: """ file_size = hdfs.get_file_status(name)['length'] return file_size def error(exception): """ Callback function used HDFS module :param exception: Exception object :return: """ logging.warn("Error in HDFS API Invocation error msg->{%s}", exception.message) def clean_empty_dirs(hdfs, root, dirs): for dir_entry in dirs: abspath = path.join(root, dir_entry) if hdfs.get_content_summary(abspath).fileCount < 1: # The directory will not be removed if not empty logging.debug("Delete directory:->{%s} as its empty", dir_entry) hdfs.delete(abspath) def cleanup_on_age(hdfs, cmd, clean_path, age): """ Clean up files when it ages as determined by threshold :param hdfs: hdfs instance :param cmd: cmd to run when threshold is reached :param clean_path: repo path :param age: Threshold value in this case age :return: None """ dir_list = clean_path if not isinstance(clean_path, list): dir_list = list() dir_list.append(clean_path) for dir_to_clean in dir_list: for root, dirs, files in hdfs.walk(dir_to_clean, topdown=False, onerror=error): logging.info("Root:{%s}->Dirs:{%s}->Files:{%s}", root, dirs, files) for filename in files: abspath = path.join(root, filename) if extract_age(age, hdfs, abspath): cmd(abspath) clean_empty_dirs(hdfs, root, dirs) def cleanup_on_size(hdfs, cmd, clean_path, size_threshold): """ Clean up hdfs data directories when threshold is reached :param hdfs: hdfs instance for file walk :param cmd: cmd to run when threshold is reached. It is usually archive or delete command :param clean_path: Path to clean :param size_threshold: Threshold value for file repo :return: None """ logging.info("Clean following dirs on basis of size [{%s}]", clean_path) dir_list = clean_path if not isinstance(clean_path, list): dir_list = list() dir_list.append(clean_path) for clean_dir in dir_list: try: space_consumed = hdfs.get_content_summary(clean_dir).length logging.info("Space consumed by directory{%s} on filesystem:{%d} policy threshold:{%d}", clean_dir, space_consumed, size_threshold) if space_consumed > size_threshold: for root, dirs, files in hdfs.walk(clean_dir, topdown=False, onerror=error): logging.info("Root:{%s}->Dirs:{%s}->Files:{%s}", root, dirs, files) for item in files: if space_consumed <= size_threshold: break # Read the file-size from HDFS, remove file and update the space_consumed abspath = path.join(root, item) file_size = extract_size(hdfs, abspath) cmd(abspath) space_consumed -= file_size clean_empty_dirs(hdfs, root, dirs) except HdfsFileNotFoundException as hdfs_file_not_found_exception: logging.warn("{%s}", hdfs_file_not_found_exception.message) except Exception as exception: logging.warn("Exception in clean directories possibly dir doesnt exist{%s}", exception.message) def cleanup_spark(spark_path): """ Clean up spark log and app files :param spark_path: filesystem path that contains spark related files :return: """ logging.info('Cleaning spark streaming cruft') reg = re.compile('/(application_[0-9]*_[0-9]*)(.inprogress)*$') for dir_to_consider in spark_path: logging.info('cleaning up %s', dir_to_consider) try: sub_dirs = subprocess.check_output(['hadoop', 'fs', '-ls', dir_to_consider], stderr=FNULL) except subprocess.CalledProcessError: logging.warn('failed to ls %s', dir_to_consider) continue for dir_path_line in sub_dirs.splitlines(): search_match = reg.search(dir_path_line) if search_match: app_id = search_match.group(1) try: app_status = subprocess.check_output(['yarn', 'application', '-status', app_id], stderr=FNULL) except subprocess.CalledProcessError: logging.warn( 'app probably not known to resource manager for some reason (like yarn was ' 'restarted)') app_status = 'State : FINISHED' dir_path_line_parts = dir_path_line.split(' ') dir_path_line_parts = filter(None, dir_path_line_parts) dir_path = "%s" % ''.join(dir_path_line_parts[7:]) if 'State : FINISHED' in app_status or 'State : FAILED' in app_status or \ 'State : KILLED' in app_status: logging.warn('delete: %s', dir_path) try: subprocess.check_output( ['hadoop', 'fs', '-rm', '-r', '-f', '-skipTrash', dir_path]) except subprocess.CalledProcessError: logging.warn('failed to delete: %s', dir_path) else: logging.warn('keep: %s', dir_path) def read_datasets_from_hbase(table_name, hbase_host): """ Connect to hbase table and return list of datasets :param table_name: :param hbase_host: :return: """ logging.info("Connecting to database to retrieve datasets ") datasets = list() try: connection = happybase.Connection(hbase_host) connection.open() table = connection.table(table_name, ) logging.info('connecting to hbase to read data sets') for key, data in table.scan(): logging.debug("Looking for next data in HBase") dataset = dict(name=key, path=data['cf:path'], policy=data['cf:policy'], retention=data['cf:retention'], mode=data['cf:mode']) if dataset['policy'] == "size": dataset['retention'] = int(dataset['retention']) * 1024 * 1024 * 1024 datasets.append(dataset) elif dataset['policy'] == "age": # from days to seconds age_in_secs = int(dataset['retention']) * 86400 dataset['retention'] = int(time.time() - age_in_secs) datasets.append(dataset) else: logging.error("Invalid dataset entry in HBase") except Exception as exception: logging.warn("Exception thrown for datasets walk on HBASE->'{%s}'", exception.message) return datasets class JOB(object): """ The Clean up job instance. It takes in strategy and run as part of schedule or cron """ def __init__(self, name, hdfs, strategy, cmd, repo_path, threshold): self.name = name self.hdfs = hdfs self.strategy = strategy self.cmd = cmd self.path = repo_path self.threshold = threshold def run(self): """ Run specific job :return: """ if hasattr(self.strategy, '__call__'): self.strategy(self.hdfs, self.cmd, self.path, self.threshold) def main(): """ Main function of job cleanup module :return: none """ # instantiate platform for Cloudera # need to be removed, once spark refactoring happens jobs = list() with file('properties.json') as property_file: properties = json.load(property_file) platform = Platform.factory(properties['hadoop_distro']) # discover endpoints endpoints = platform.discover(properties) assert endpoints fileConfig('logconf.ini') logging.info("Discovered following endpoints from cluster manager{%s}", endpoints) # setup endpoints hdfs = HdfsClient(endpoints["HDFS"].geturl(), user_name='hdfs') hbase = endpoints["HBASE"].geturl() # Create s3 or swift bucket for archive purposes try: if properties['s3_archive_region'] != '': container_type = 's3' s3conn = boto.s3.connect_to_region(properties['s3_archive_region'], aws_access_key_id=properties['s3_archive_access_key'], aws_secret_access_key=properties['s3_archive_secret_access_key']) if properties['s3_archive_region'] == "us-east-1": # use "US Standard" region. workaround for https://github.com/boto/boto3/issues/125 s3conn.create_bucket(properties['container_name']) else: s3conn.create_bucket(properties['container_name'], location=properties['s3_archive_region']) else: container_type = 'swift' swift_conn = swiftclient.client.Connection(auth_version='2', user=properties['swift_user'], key=properties['swift_key'], tenant_name=properties['swift_account'], authurl=properties['swift_auth_url'], timeout=30) swift_conn.put_container(properties['container_name']) swift_conn.close() except Exception as ex: # the create container operations are idempotent so would only expect genuine errors here logging.error("Failed to create %s container %s", container_type, properties['container_name']) logging.error(traceback.format_exc(ex)) # create partial functions delete_cmd = partial(delete, hdfs) archive_cmd = partial(archive, properties['swift_repo'], hdfs) # clean spark directors spark_streaming_dirs_to_clean = properties['spark_streaming_dirs_to_clean'] cleanup_spark(spark_streaming_dirs_to_clean) # general directories to clean general_dirs_to_clean = properties['general_dirs_to_clean'] job_common_dirs = JOB('clean_general_dir', hdfs, cleanup_on_size, delete_cmd, general_dirs_to_clean, NEG_SIZE) jobs.append(job_common_dirs) old_dirs_to_clean = properties['old_dirs_to_clean'] for entry in old_dirs_to_clean: print entry['name'] age = int(time.time() - entry['age_seconds']) job_old_dirs = JOB('clean_old_dir', hdfs, cleanup_on_age, delete_cmd, entry['name'], age) jobs.append(job_old_dirs) # # Read all datasets data_sets = read_datasets_from_hbase(properties['datasets_table'], hbase) for item in data_sets: logging.debug("dataset item being scheduled {%s}", item) cmd = delete_cmd if 'mode' in item
<gh_stars>0 import sys sys.path.append('..') import torch.multiprocessing as multiprocessing try: multiprocessing.set_start_method('spawn') except RuntimeError: pass from contextlib import closing import time import functools import paramparse import pandas as pd from pprint import pformat from torch.utils.data import DataLoader from YOLOv3TestParams import YOLOv3TestParams from yolov3_models import * from yolov3_utils.datasets import * from yolov3_utils.utils import * from labelling_tool.tracking.DaSiamRPN.DaSiamRPN import DaSiamRPN # from labelling_tool.tracking.siamfc.SiamFC import SiamFC from labelling_tool.tracking.SiamMask.SiamMask import SiamMask from labelling_tool.tracking.Utilities import drawBox def runDetector(_idx, model, imgs, device, conf_thresh, nms_type, nms_thresh, # trackers=None, curr_frame=None ): # if _idx > 0: # # _start_t = time.time() # tracker = trackers[_idx - 1] # tracker.update(curr_frame) # # _end_t = time.time() # # remove_tracker = 0 # # if tracker.confidence < track_thresh: # # # trackers_to_remove.append(tracker) # # remove_tracker = 1 # # print('Removing tracker {} with confidence: {}'.format( # # tracker.target_id, tracker.confidence)) # return None, None, None, None _start_t = time.time() # targets = targets.to(device) # Plot images with bounding boxes # if batch_i == 0 and not os.path.exists('test_batch0.jpg'): # plot_images(imgs=imgs, targets=targets, fname='test_batch0.jpg') # Run model inf_out, train_out = model(imgs) # inference and training outputs print(inf_out) # Compute loss # if hasattr(model, 'hyp'): # if model has loss hyperparameters # loss_i, _ = compute_loss(train_out, targets, model) # loss += loss_i.item() _end_t = time.time() # Run NMS output = non_max_suppression(inf_out, conf_thres=conf_thresh, nms_thres=nms_thresh, nms_style=nms_type) nms_end_t = time.time() return output, _start_t, _end_t, nms_end_t def test(opt, model=None): """ :param YOLOv3TestParams opt: :param model: :return: """ weights_base = os.path.basename(opt.weights) weights_dir = os.path.dirname(opt.weights) test_path_base = os.path.basename(opt.test_path) if not opt.save_dir: prefix = os.path.splitext(weights_base)[0] if opt.out_suffix: prefix = '{}_{}'.format(prefix, opt.out_suffix) opt.save_dir = '{}_on_{}'.format(prefix, os.path.splitext(test_path_base)[0]) opt.save_dir = os.path.join(weights_dir, opt.save_dir) if not os.path.isdir(opt.save_dir): os.makedirs(opt.save_dir) _tracker_types = opt.help['tracker_type'] if opt.tracker_type: if opt.batch_size > 1: raise IOError('Batch size must be 1 to use tracking') _tracker_type = [k for k in _tracker_types if opt.tracker_type in _tracker_types[k]] if not _tracker_type: raise IOError('Invalid tracker_type: {}'.format(opt.tracker_type)) _tracker_type = _tracker_type[0] # print('Tracking is enabled') #if _tracker_type == 'SiamFC': # create_tracker = functools.partial(SiamFC, params=opt.siam_fc) # print('Using SiamFC tracker') if _tracker_type == 'DaSiamRPN': create_tracker = functools.partial(DaSiamRPN, params=opt.da_siam_rpn, logger=None, ) print('Using DaSiamRPN tracker') elif _tracker_type == 'SiamMask': create_tracker = functools.partial(SiamMask, params=opt.siam_mask, ) print('Using SiamMask tracker') else: raise IOError('Invalid tracker_type: {}'.format(opt.tracker_type)) max_target_id = 0 print('Saving csv results to: {}'.format(opt.save_dir)) nms_type = opt.nms_type.upper() conf_thresh = opt.conf_thresh nms_thresh = opt.nms_thresh if model is None: device = torch_utils.select_device() # Initialize model model = Darknet(opt.net_cfg, opt.img_size).to(device) # Load weights if opt.weights.endswith('.pt'): # pytorch format model.load_state_dict(torch.load(opt.weights, map_location=device)['model']) else: # darknet format _ = load_darknet_weights(model, opt.weights) if torch.cuda.device_count() > 1: model = nn.DataParallel(model) else: device = next(model.parameters()).device # get model device # Configure run data_cfg = parse_data_cfg(opt.data_cfg) nc = int(data_cfg['classes']) # number of classes # test_path = data_cfg['test'] # path to test images names = load_classes(data_cfg['names']) # class names # Dataloader dataset = LoadImagesAndLabels(opt.test_path, opt.img_size, opt.batch_size, rect=False, sort_files=True, elk_vs_all=opt.elk_vall) dataloader = DataLoader(dataset, batch_size=opt.batch_size, num_workers=4, pin_memory=True, collate_fn=dataset.collate_fn) # seen = 0 model.eval() # # coco91class = coco80_to_coco91_class() # print(('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP', 'F1')) loss, p, r, f1, mp, mr, map, mf1 = 0., 0., 0., 0., 0., 0., 0., 0. # jdict, stats, ap, ap_class = [], [], [], [] csv_raw = [] prev_eval_seq = '' seq_id = 0 n_frames = len(dataset.img_files) frame_id = 0 avg_fps = avg_nms_fps = avg_tracking_fps = avg_overall_fps = 0 seq_n_frames = 0 prev_tracker_frame_id = -1 trackers = [] _pause = 1 tracking_fps = 0 print('Processing {} sequences'.format(dataset.n_sequences)) for batch_i, (orig_imgs, imgs, targets, paths, shapes) in enumerate(dataloader): # tqdm(dataloader, desc='Running on {} image batches'.format(batch_size))) actual_batch_size, _, height, width = imgs.shape # batch size, channels, height, width curr_frame = orig_imgs[0] imgs = imgs.to(device) if trackers: n_trackers = len(trackers) # n_ops = n_trackers + 1 # n_threads = min(n_ops, multiprocessing.cpu_count()) # print('Running {} trackers and detector using {} threads'.format( # n_trackers, n_threads)) # combined_start_t = time.time() # with closing(multiprocessing.Pool(n_threads)) as pool: # combined_out_list = pool.map(functools.partial( # runDetector, # # model=model, # # imgs=imgs, # # device=device, # # conf_thresh=opt.conf_thresh, # # nms_type=nms_type, # # nms_thresh=opt.nms_thresh, # # trackers=trackers, # # curr_frame=curr_frame, # ), range(n_ops)) # combined_end_t = time.time() # combined_fps = 1.0 / (combined_end_t - combined_start_t) # print('Combined fps: {:.4f}'.format(combined_fps)) # output, _start_t, _end_t, nms_end_t = combined_out_list[0] output, _start_t, _end_t, nms_end_t = runDetector(0, model, imgs, device, conf_thresh, nms_type, nms_thresh ) removed_target_ids = [] combined_start_t = time.time() # trackers_to_remove = [] # print('before: n_trackers: {}'.format(n_trackers)) for i, tracker in enumerate(trackers): tracker.update(curr_frame) if tracker.confidence < opt.track_thresh: # trackers_to_remove.append(tracker) removed_target_ids.append(tracker.target_id) if opt.verbose: print('Removing tracker {} with confidence: {}'.format( tracker.target_id, tracker.confidence)) combined_end_t = time.time() # track_t = (combined_end_t - combined_start_t) - (_end_t - _start_t) track_t = combined_end_t - combined_start_t try: tracking_fps = 1.0 / float(track_t) except ZeroDivisionError: tracking_fps = 0 trackers = [tracker for tracker in trackers if tracker.target_id not in removed_target_ids] else: output, _start_t, _end_t, nms_end_t = runDetector(0, model, imgs, device, conf_thresh, nms_type, nms_thresh ) # print('\noutput:\n{}'.format(pformat(output))) # print('\npaths:\n{}'.format(pformat(paths))) # Statistics per image for si, pred in enumerate(output): # labels = targets[targets[:, 0] == si, 1:] # nl = len(labels) # tcls = labels[:, 0].tolist() if nl else [] # target class # seen += 1 curr_frame = orig_imgs[si] eval_file = paths[si] eval_seq = os.path.dirname(eval_file) if not prev_eval_seq: prev_eval_seq = eval_seq seq_id += 1 seq_n_frames = dataset.seq_to_n_frames[prev_eval_seq] print('\nProcessing sequence {}/{}: {}'.format(seq_id, dataset.n_sequences, prev_eval_seq)) seq_frame_id = 0 prev_tracker_frame_id = -1 if eval_seq != prev_eval_seq: if prev_eval_seq: # print('Done sequence {} with {} frames'.format(prev_eval_seq, len(csv_raw))) eval_seq_name = os.path.basename(prev_eval_seq) csv_file_name = os.path.join(opt.save_dir, '{}.csv'.format(eval_seq_name)) print('\nWriting csv data for {} frames to: {}\n'.format(len(csv_raw), csv_file_name)) pd.DataFrame(csv_raw).to_csv(csv_file_name) csv_raw = [] trackers = [] prev_eval_seq = eval_seq seq_id += 1 prev_tracker_frame_id = -1 seq_frame_id = 0 seq_n_frames = dataset.seq_to_n_frames[prev_eval_seq] print('\nProcessing sequence {}/{}: {}'.format(seq_id, dataset.n_sequences, prev_eval_seq)) if pred is None: print('\nNone pred for {}'.format(eval_file)) pred = [] n_raw_dets = 0 else: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... height, width = imgs[si].shape[1:3] # image_id = int(Path(paths[si]).stem.split('_')[-1]) box = pred[:, :4].clone() # xyxy scale_coords(imgs[si].shape[1:], box, shapes[si]) # to original shape # box = xyxy2xywh(box) # xywh # box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner n_raw_dets = pred.shape[0] # print('\nProcessing eval_file {}'.format(eval_file)) new_trackers = {} bboxes = [] # print('n_raw_dets: {}'.format(n_raw_dets)) for di, d in enumerate(pred): xmin, ymin, xmax, ymax = [float(x) for x in box[di]] label_id = int(d[6]) label = names[label_id] confidence = float(d[4]) bbox = [xmin, ymin, xmax, ymax, label, confidence, -1] # associated_bboxes = {} # bboxes_to_initialize = {} if opt.tracker_type: _id = associate(trackers, [xmin, ymin, xmax, ymax], label, opt.assoc_thresh) if _id < 0: # unassociated detection if prev_tracker_frame_id == -1 or seq_frame_id - prev_tracker_frame_id >= opt.track_diff: # min frame difference between tracker creation max_target_id += 1 new_tracker = create_tracker(target_id=max_target_id, label=label, confidence=confidence) w = xmax - xmin h = ymax - ymin cx = xmin + w / 2.0 cy = ymin + h / 2.0 # bboxes_to_initialize[max_target_id] = [cx, cy, w, h] new_trackers[max_target_id] = (new_tracker, [cx, cy, w, h]) if opt.filter_unassociated: bbox = [xmin, ymin, xmax, ymax, label, confidence, new_tracker.target_id] # associated_bboxes[new_tracker.target_id] = bbox else: _det_bbox = [xmin, ymin, xmax, ymax] # _tracker_bbox = trackers[_id].bbox # mean_bbox = [(_tracker_bbox[i] + _det_bbox[i]) / 2.0 for i in range(4)] # mean_confidence = (trackers[_id].confidence + confidence) / 2.0 mean_bbox = _det_bbox mean_confidence = confidence bbox = mean_bbox + [label, mean_confidence, 0] # associated_bboxes[trackers[_id].target_id] = bbox bboxes.append(bbox) n_det_bboxes = len(bboxes) removed_target_ids = [] if opt.tracker_type: # trackers_to_remove = [] for i, tracker in enumerate(trackers): if tracker.associated: tracker.associated_frames += 1 tracker.associated = 0 tracker.unassociated_frames = 0 continue tracker.unassociated_frames += 1 if opt.unassoc_thresh and tracker.unassociated_frames > opt.unassoc_thresh: if opt.verbose: print('Removing tracker {} with unassociated_frames: {}'.format( tracker.target_id, tracker.unassociated_frames)) # trackers_to_remove.append(tracker) removed_target_ids.append(tracker.target_id) continue bboxes.append(tracker.bbox + [tracker.label, tracker.cumulative_confidence, tracker.target_id]) tracker.associated = 0 # remove trackers that have gone unassociated for too long n_removed_trackers = len(removed_target_ids) # for tracker in trackers_to_remove: # print('Removing tracker {} with unassociated_frames: {}'.format( # tracker.target_id, tracker.unassociated_frames)) trackers = [tracker for tracker in trackers if tracker.target_id not in removed_target_ids] if new_trackers: prev_tracker_frame_id = seq_frame_id trackers += [new_tracker[0] for _, new_tracker
preview_response.json(), dict( success=True, previewTable=[ dict( name=self.img1.metadata.name, link=reverse( 'annotation_tool', kwargs=dict(image_id=self.img1.pk)), createInfo="Will create 2 points, 2 annotations", deleteInfo="Will delete 2 existing annotations", ), dict( name=self.img2.metadata.name, link=reverse( 'annotation_tool', kwargs=dict(image_id=self.img2.pk)), createInfo="Will create 2 points, 0 annotations", deleteInfo="Will delete 1 existing annotations", ), dict( name=self.img3.metadata.name, link=reverse( 'annotation_tool', kwargs=dict(image_id=self.img3.pk)), createInfo="Will create 2 points, 2 annotations", ), ], previewDetails=dict( numImages=3, totalPoints=6, totalAnnotations=4, numImagesWithExistingAnnotations=2, ), ), ) self.assertDictEqual(upload_response.json(), dict(success=True)) values_set = set( Point.objects.filter( image__in=[self.img1, self.img2, self.img3]) .values_list('column', 'row', 'point_number', 'image_id')) self.assertSetEqual(values_set, { (10, 10, 1, self.img1.pk), (20, 20, 2, self.img1.pk), (30, 30, 1, self.img2.pk), (40, 40, 2, self.img2.pk), (50, 50, 1, self.img3.pk), (60, 60, 2, self.img3.pk), }) annotations = Annotation.objects.filter( image__in=[self.img1, self.img2, self.img3]) values_set = set( (a.label_code, a.point.pk, a.image.pk) for a in annotations ) self.assertSetEqual(values_set, { ('A', Point.objects.get( point_number=1, image=self.img1).pk, self.img1.pk), ('A', Point.objects.get( point_number=2, image=self.img1).pk, self.img1.pk), ('A', Point.objects.get( point_number=1, image=self.img3).pk, self.img3.pk), ('B', Point.objects.get( point_number=2, image=self.img3).pk, self.img3.pk), }) self.img1.annoinfo.refresh_from_db() self.assertIsNotNone(self.img1.annoinfo.last_annotation) self.img2.annoinfo.refresh_from_db() self.assertIsNone(self.img2.annoinfo.last_annotation) self.img3.annoinfo.refresh_from_db() self.assertIsNotNone(self.img3.annoinfo.last_annotation) def test_label_codes_different_case_csv(self): """ The import file's label codes can use different upper/lower case and still be matched to the corresponding labelset label codes. """ # Make a longer-than-1-char label code so we can test that # lower() is being used on both the label's code and the CSV value labels = self.create_labels(self.user, ['Abc'], 'Group1') self.create_labelset(self.user, self.source, labels) rows = [ ['Name', 'Column', 'Row', 'Label'], ['1.png', 60, 40, 'aBc'], ] csv_file = self.make_csv_file('A.csv', rows) preview_response = self.preview_csv_annotations( self.user, self.source, csv_file) upload_response = self.upload_annotations(self.user, self.source) self.check_label_codes_different_case( preview_response, upload_response) def test_label_codes_different_case_cpc(self): """ The import file's label codes can use different upper/lower case and still be matched to the corresponding labelset label codes. """ # Make a longer-than-1-char label code so we can test that # lower() is being used on both the label's code and the CSV value labels = self.create_labels(self.user, ['Abc'], 'Group1') self.create_labelset(self.user, self.source, labels) cpc_files = [ self.make_cpc_file( self.image_dimensions, '1.cpc', r"C:\My Photos\2017-05-13 GBR\1.png", [ (60*15, 40*15, 'aBc')]), ] preview_response = self.preview_cpc_annotations( self.user, self.source, cpc_files) upload_response = self.upload_annotations(self.user, self.source) self.check_label_codes_different_case( preview_response, upload_response) def check_label_codes_different_case( self, preview_response, upload_response): self.assertDictEqual( preview_response.json(), dict( success=True, previewTable=[ dict( name=self.img1.metadata.name, link=reverse( 'annotation_tool', kwargs=dict(image_id=self.img1.pk)), createInfo="Will create 1 points, 1 annotations", ), ], previewDetails=dict( numImages=1, totalPoints=1, totalAnnotations=1, numImagesWithExistingAnnotations=0, ), ), ) self.assertDictEqual(upload_response.json(), dict(success=True)) values_set = set( Point.objects.filter(image__in=[self.img1]) .values_list('column', 'row', 'point_number', 'image_id')) self.assertSetEqual(values_set, { (60, 40, 1, self.img1.pk), }) annotations = Annotation.objects.filter(image__in=[self.img1]) values_set = set( (a.label_code, a.point.pk, a.image.pk) for a in annotations ) self.assertSetEqual(values_set, { ('Abc', Point.objects.get( point_number=1, image=self.img1).pk, self.img1.pk), }) def test_skipped_filenames_csv(self): """ The CSV can have filenames that we don't recognize. Those rows will just be ignored. """ rows = [ ['Name', 'Column', 'Row', 'Label'], ['1.png', 50, 50, 'A'], ['4.png', 60, 40, 'B'], ] csv_file = self.make_csv_file('A.csv', rows) preview_response = self.preview_csv_annotations( self.user, self.source, csv_file) upload_response = self.upload_annotations(self.user, self.source) self.check_skipped_filenames(preview_response, upload_response) def test_skipped_filenames_cpc(self): """ There can be CPCs corresponding to filenames that we don't recognize. Those CPCs will just be ignored. """ cpc_files = [ self.make_cpc_file( self.image_dimensions, '1.cpc', r"C:\My Photos\2017-05-13 GBR\1.png", [ (50*15, 50*15, 'A')]), # image parameter is just for getting image dimensions. # This cpc should be skipped anyway, so we don't care which image # we pass in. self.make_cpc_file( self.image_dimensions, '4.cpc', r"C:\My Photos\2017-05-13 GBR\4.png", [ (60*15, 40*15, 'B')]), ] preview_response = self.preview_cpc_annotations( self.user, self.source, cpc_files) upload_response = self.upload_annotations(self.user, self.source) self.check_skipped_filenames(preview_response, upload_response) def check_skipped_filenames(self, preview_response, upload_response): self.assertDictEqual( preview_response.json(), dict( success=True, previewTable=[ dict( name=self.img1.metadata.name, link=reverse( 'annotation_tool', kwargs=dict(image_id=self.img1.pk)), createInfo="Will create 1 points, 1 annotations", ), ], previewDetails=dict( numImages=1, totalPoints=1, totalAnnotations=1, numImagesWithExistingAnnotations=0, ), ), ) self.assertDictEqual(upload_response.json(), dict(success=True)) values_set = set( Point.objects.filter(image__in=[self.img1]) .values_list('column', 'row', 'point_number', 'image_id')) self.assertSetEqual(values_set, { (50, 50, 1, self.img1.pk), }) annotations = Annotation.objects.filter(image__in=[self.img1]) values_set = set( (a.label_code, a.point.pk, a.image.pk) for a in annotations ) self.assertSetEqual(values_set, { ('A', Point.objects.get( point_number=1, image=self.img1).pk, self.img1.pk), }) class UploadAnnotationsMultipleSourcesTest(UploadAnnotationsBaseTest): """ Test involving multiple sources. """ @classmethod def setUpTestData(cls): super().setUpTestData() cls.user = cls.create_user() cls.source = cls.create_source(cls.user) cls.source2 = cls.create_source(cls.user) labels = cls.create_labels(cls.user, ['A', 'B'], 'Group1') cls.create_labelset(cls.user, cls.source, labels) cls.create_labelset(cls.user, cls.source2, labels) cls.img1_s1 = cls.upload_image( cls.user, cls.source, image_options=dict(filename='1.png', width=100, height=100)) cls.img1_s2 = cls.upload_image( cls.user, cls.source2, image_options=dict(filename='1.png', width=100, height=100)) cls.img2_s2 = cls.upload_image( cls.user, cls.source2, image_options=dict(filename='2.png', width=100, height=100)) cls.image_dimensions = (100, 100) def test_other_sources_unaffected_csv(self): """ We shouldn't touch images of other sources which happen to have the same image names. """ # Upload to source 2 rows = [ ['Name', 'Column', 'Row', 'Label'], ['1.png', 10, 10, 'B'], ['1.png', 20, 20, 'B'], ['2.png', 15, 15, 'A'], ['2.png', 25, 25, 'A'], ] csv_file = self.make_csv_file('A.csv', rows) self.preview_csv_annotations(self.user, self.source2, csv_file) self.upload_annotations(self.user, self.source2) # Upload to source 1 rows = [ ['Name', 'Column', 'Row', 'Label'], ['1.png', 50, 50, 'A'], # This image doesn't exist in source 1 ['2.png', 60, 40, 'B'], ] csv_file = self.make_csv_file('B.csv', rows) preview_response = self.preview_csv_annotations( self.user, self.source, csv_file) upload_response = self.upload_annotations(self.user, self.source) self.check_other_sources_unaffected(preview_response, upload_response) def test_other_sources_unaffected_cpc(self): """ We shouldn't touch images of other sources which happen to have the same image names. """ # Upload to source 2 cpc_files = [ self.make_cpc_file( self.image_dimensions, '1.cpc', r"C:\My Photos\2017-05-13 GBR\1.png", [ (10*15, 10*15, 'B'), (20*15, 20*15, 'B')]), self.make_cpc_file( self.image_dimensions, '2.cpc', r"C:\My Photos\2017-05-13 GBR\2.png", [ (15*15, 15*15, 'A'), (25*15, 25*15, 'A')]), ] self.preview_cpc_annotations(self.user, self.source2, cpc_files) self.upload_annotations(self.user, self.source2) # Upload to source 1 cpc_files = [ self.make_cpc_file( self.image_dimensions, '1.cpc', r"C:\My Photos\2017-05-13 GBR\1.png", [ (50*15, 50*15, 'A')]), # This image doesn't exist in source 1 self.make_cpc_file( self.image_dimensions, '2.cpc', r"C:\My Photos\2017-05-13 GBR\2.png", [ (60*15, 40*15, 'B')]), ] preview_response = self.preview_cpc_annotations( self.user, self.source, cpc_files) upload_response = self.upload_annotations(self.user, self.source) self.check_other_sources_unaffected(preview_response, upload_response) def check_other_sources_unaffected( self, preview_response, upload_response): # Check source 1 responses self.assertDictEqual( preview_response.json(), dict( success=True, previewTable=[ dict( name=self.img1_s1.metadata.name, link=reverse( 'annotation_tool', kwargs=dict(image_id=self.img1_s1.pk)), createInfo="Will create 1 points, 1 annotations", ), ], previewDetails=dict( numImages=1, totalPoints=1, totalAnnotations=1, numImagesWithExistingAnnotations=0, ), ), ) self.assertDictEqual(upload_response.json(), dict(success=True)) # Check source 1 objects values_set = set( Point.objects.filter(image__in=[self.img1_s1]) .values_list('column', 'row', 'point_number', 'image_id')) self.assertSetEqual(values_set, { (50, 50, 1, self.img1_s1.pk), }) annotations = Annotation.objects.filter(image__in=[self.img1_s1]) values_set = set( (a.label_code, a.point.pk, a.image.pk) for a in annotations ) self.assertSetEqual(values_set, { ('A', Point.objects.get( point_number=1, image=self.img1_s1).pk, self.img1_s1.pk), }) # Check source 2 objects values_set = set( Point.objects.filter(image__in=[self.img1_s2, self.img2_s2]) .values_list('column', 'row', 'point_number', 'image_id')) self.assertSetEqual(values_set, { (10, 10, 1, self.img1_s2.pk), (20, 20, 2, self.img1_s2.pk), (15, 15, 1, self.img2_s2.pk), (25, 25, 2, self.img2_s2.pk), }) annotations = Annotation.objects.filter( image__in=[self.img1_s2, self.img2_s2]) values_set = set( (a.label_code, a.point.pk, a.image.pk) for a in annotations ) self.assertSetEqual(values_set, { ('B', Point.objects.get( point_number=1, image=self.img1_s2).pk, self.img1_s2.pk), ('B', Point.objects.get( point_number=2, image=self.img1_s2).pk, self.img1_s2.pk), ('A', Point.objects.get( point_number=1, image=self.img2_s2).pk, self.img2_s2.pk), ('A', Point.objects.get( point_number=2, image=self.img2_s2).pk, self.img2_s2.pk), }) class UploadAnnotationsContentsTest(UploadAnnotationsBaseTest): """ Annotation upload edge cases and error cases related to contents. """ @classmethod def setUpTestData(cls): super().setUpTestData() cls.user = cls.create_user() cls.source = cls.create_source(cls.user) # Labels in labelset labels = cls.create_labels(cls.user, ['A', 'B'], 'Group1') cls.create_labelset(cls.user, cls.source, labels) # Label not in labelset cls.create_labels(cls.user, ['C'], 'Group1') cls.img1 = cls.upload_image( cls.user, cls.source, image_options=dict(filename='1.png', width=200, height=100)) cls.img2 = cls.upload_image( cls.user, cls.source, image_options=dict(filename='2.png', width=100, height=200)) cls.image_dimensions_1 = (200, 100) cls.image_dimensions_2 = (100, 200) def do_success_csv(self, point_data, expected_points_set): rows = [['1.png']+list(p) for p in point_data] if len(rows[0]) == 3: header_row = ['Name', 'Column', 'Row'] else: header_row = ['Name', 'Column', 'Row', 'Label'] csv_file = self.make_csv_file('A.csv', [header_row] + rows) self.preview_csv_annotations(self.user, self.source, csv_file) self.upload_annotations(self.user, self.source) values_set = set( Point.objects.filter(image__in=[self.img1]) .values_list('column', 'row', 'point_number', 'image_id')) self.assertSetEqual(values_set, expected_points_set) def do_error_csv(self, point_data, expected_error): rows = [['1.png']+list(p) for p in point_data] if len(rows[0]) == 3: header_row = ['Name', 'Column', 'Row'] else: header_row = ['Name', 'Column', 'Row', 'Label'] csv_file = self.make_csv_file('A.csv', [header_row] + rows) preview_response = self.preview_csv_annotations( self.user, self.source, csv_file) self.assertDictEqual( preview_response.json(), dict(error=expected_error)) def do_success_cpc(self, point_data, expected_points_set): if len(point_data[0]) == 2: # point_data elements have (column, row). Add a blank label code. point_data = [p+('',) for p in point_data] cpc_files = [ self.make_cpc_file( self.image_dimensions_1, '1.cpc', r"C:\My Photos\2017-05-13 GBR\1.png", point_data)] self.preview_cpc_annotations(self.user, self.source, cpc_files) self.upload_annotations(self.user, self.source) values_set = set( Point.objects.filter(image__in=[self.img1]) .values_list('column', 'row', 'point_number', 'image_id')) self.assertSetEqual(values_set, expected_points_set) def do_error_cpc(self, point_data, expected_error): if len(point_data[0]) == 2: point_data = [p+('',) for p in point_data] cpc_files = [ self.make_cpc_file( self.image_dimensions_1, '1.cpc', r"C:\My Photos\2017-05-13 GBR\1.png", point_data)] preview_response = self.preview_cpc_annotations( self.user, self.source, cpc_files) self.assertDictEqual( preview_response.json(), dict(error=expected_error)) def test_row_not_number_csv(self): """A row/col which can't be parsed as a number should result in an appropriate error message.""" self.do_error_csv( [(50, 'abc')], "For image 1.png, point 1:" "
<gh_stars>1-10 #!/usr/bin/env python # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Full AlphaFold protein structure prediction script.""" import json import os import pathlib import pickle import random import sys import time from typing import Dict, Union, Optional, List import jax from absl import app from absl import flags from absl import logging import numpy as np from alphafold.common import protein from alphafold.common import residue_constants from alphafold.data import pipeline from alphafold.data import pipeline_multimer from alphafold.data import templates from alphafold.data.tools import hhsearch from alphafold.data.tools import hmmsearch from alphafold.model import config from alphafold.model import model from alphafold.model import data from alphafold.relax import relax # Internal import (7716). import libconfig_af # the main input arguments flags.DEFINE_string( 'fasta_path', None, 'Paths to a FASTA file, If the FASTA file contains ' 'multiple sequences, then it will be folded as a multimer. ') flags.DEFINE_boolean( 'is_prokaryote', None, 'Optional for multimer system, not used by the single chain system. ' 'specifying true where the target complex is from a prokaryote, and false ' 'where it is not, or where the origin is unknown. These values determine ' 'the pairing method for the MSA.') flags.DEFINE_string('data_dir', libconfig_af.data_dir, 'Path to directory of supporting data.') flags.DEFINE_string('output_dir', os.getcwd(), 'Path to a directory that will store the results.') # paths to executables flags.DEFINE_string('jackhmmer_binary_path', libconfig_af.jackhmmer_binary_path, 'Path to the JackHMMER executable.') flags.DEFINE_string('hhblits_binary_path', libconfig_af.hhblits_binary_path, 'Path to the HHblits executable.') flags.DEFINE_string('hhsearch_binary_path', libconfig_af.hhsearch_binary_path, 'Path to the HHsearch executable.') flags.DEFINE_string('hmmsearch_binary_path', libconfig_af.hmmsearch_binary_path, 'Path to the hmmsearch executable.') flags.DEFINE_string('hmmbuild_binary_path', libconfig_af.hmmbuild_binary_path, 'Path to the hmmbuild executable.') flags.DEFINE_string('kalign_binary_path', libconfig_af.kalign_binary_path, 'Path to the Kalign executable.') # paths to databases flags.DEFINE_string('uniref90_database_path', libconfig_af.uniref90_database_path, 'Path to the Uniref90 database for use by JackHMMER.') flags.DEFINE_string('mgnify_database_path', libconfig_af.mgnify_database_path, 'Path to the MGnify database for use by JackHMMER.') flags.DEFINE_string('bfd_database_path', libconfig_af.bfd_database_path, 'Path to the BFD database for use by HHblits.') flags.DEFINE_string('small_bfd_database_path', libconfig_af.small_bfd_database_path, 'Path to the small version of BFD used with the "reduced_dbs" preset.') flags.DEFINE_string('uniclust30_database_path', libconfig_af.uniclust30_database_path, 'Path to the Uniclust30 database for use by HHblits.') flags.DEFINE_string('uniprot_database_path', libconfig_af.uniprot_database_path, 'Path to the Uniprot database for use by JackHMMer.') flags.DEFINE_string('pdb70_database_path', libconfig_af.pdb70_database_path, 'Path to the PDB70 database for use by HHsearch.') flags.DEFINE_string('pdb_seqres_database_path', libconfig_af.pdb_seqres_database_path, 'Path to the PDB seqres database for use by hmmsearch.') flags.DEFINE_string('template_mmcif_dir', libconfig_af.template_mmcif_dir, 'Path to a directory with template mmCIF structures, each named <pdb_id>.cif') flags.DEFINE_string('max_template_date', libconfig_af.max_template_date, 'Maximum template release date to consider. ' 'Important if folding historical test sets.') flags.DEFINE_string('obsolete_pdbs_path', libconfig_af.obsolete_pdbs_path, 'Path to file containing a mapping from obsolete PDB IDs to the PDB IDs' 'of their replacements.') # presets flags.DEFINE_enum('db_preset', 'full_dbs', ['full_dbs', 'reduced_dbs'], 'Choose preset MSA database configuration - ' 'smaller genetic database config (reduced_dbs) or ' 'full genetic database config (full_dbs)') flags.DEFINE_enum('model_preset', 'monomer', ['monomer', 'monomer_casp14', 'monomer_ptm', 'multimer'], 'Choose preset model configuration - the monomer model, ' 'the monomer model with extra ensembling, monomer model with ' 'pTM head, or multimer model') flags.DEFINE_integer('random_seed', None, 'The random seed for the data ' 'pipeline. By default, this is randomly generated. Note ' 'that even if this is set, Alphafold may still not be ' 'deterministic, because processes like GPU inference are ' 'nondeterministic.') flags.DEFINE_boolean('use_precomputed_msas', True, 'Whether to read MSAs that ' 'have been written to disk. WARNING: This will not check ' 'if the sequence, database or configuration have changed.') # custom arguments flags.DEFINE_integer("cpu", 8, 'Number of processors for sequence searches') flags.DEFINE_boolean('jit', True, 'compile using jax.jit') flags.DEFINE_float("max_sequence_identity", -1., "Maximum sequence identity for template prefilter") flags.DEFINE_boolean("use_relax", True, "Whether to use AMBER local energy minimization") flags.DEFINE_boolean("use_templates", True, "Whether to use PDB database") flags.DEFINE_boolean("use_msa", True, "Whether to use MSA") flags.DEFINE_boolean("remove_msa_for_template_aligned", False, \ 'Remove MSA information for template aligned region') flags.DEFINE_integer("max_msa_clusters", None, 'Number of maximum MSA clusters') flags.DEFINE_integer("max_extra_msa", None, 'Number of extra sequences') flags.DEFINE_list("model_names", None, "Model configs to be run") flags.DEFINE_list("msa_path", None, "User input MSA") flags.DEFINE_list("pdb_path", None, "User input structure") flags.DEFINE_boolean("multimer", False, "Whether to use the multimer modeling hack") flags.DEFINE_integer("num_recycle", 3, "The number of recycling") flags.DEFINE_boolean("feature_only", False, "Whether to generate features.pkl only") FLAGS = flags.FLAGS MAX_TEMPLATE_HITS = 20 RELAX_MAX_ITERATIONS = 0 RELAX_ENERGY_TOLERANCE = 2.39 RELAX_STIFFNESS = 10.0 RELAX_EXCLUDE_RESIDUES = [] RELAX_MAX_OUTER_ITERATIONS = 3 def _check_flag(flag_name: str, other_flag_name: str, should_be_set: bool): if should_be_set != bool(FLAGS[flag_name].value): verb = 'be' if should_be_set else 'not be' raise ValueError(f'{flag_name} must {verb} set when running with ' f'"--{other_flag_name}={FLAGS[other_flag_name].value}".') def remove_msa_for_template_aligned_regions(feature_dict): if 'template_all_atom_masks' in feature_dict: mask = feature_dict['template_all_atom_masks'] elif 'template_all_atom_mask' in feature_dict: mask = feature_dict['template_all_atom_mask'] mask = (mask.sum(axis=(0,2)) > 0) # # need to check further for multimer_mode if 'deletion_matrix_int' in feature_dict: feature_dict['deletion_matrix_int'][:,mask] = 0 else: feature_dict['deletion_matrix'][:,mask] = 0 feature_dict['msa'][:,mask] = 21 return feature_dict def retrieve_custom_features(processed_feature_dict, feature_dict): for name in ['for_pdb_record']: if name in feature_dict: processed_feature_dict[name] = feature_dict[name] def predict_structure( fasta_path: str, fasta_name: str, msa_path: Union[str, List[str]], pdb_path: Union[str, List[str]], output_dir_base: str, data_pipeline: Union[pipeline.DataPipeline, pipeline_multimer.DataPipeline], model_runners: Dict[str, model.RunModel], amber_relaxer: relax.AmberRelaxation, remove_msa_for_template_aligned: bool, feature_only: bool, random_seed: int, is_prokaryote: Optional[bool] = None): """Predicts structure using AlphaFold for the given sequence.""" logging.info('Predicting %s', fasta_name) timings = {} output_dir = os.path.join(output_dir_base, fasta_name) if not os.path.exists(output_dir): os.makedirs(output_dir) msa_output_dir = os.path.join(output_dir, 'msas') if not os.path.exists(msa_output_dir): os.makedirs(msa_output_dir) # Get features. # modified to re-use features.pkl file, if it exists. t_0 = time.time() features_output_path = os.path.join(output_dir, 'features.pkl') if os.path.exists(features_output_path): with open(features_output_path, 'rb') as f: feature_dict = pickle.load(f) else: if is_prokaryote is None: feature_dict = data_pipeline.process( input_fasta_path=fasta_path, input_msa_path=msa_path, input_pdb_path=pdb_path, msa_output_dir=msa_output_dir) else: feature_dict = data_pipeline.process( input_fasta_path=fasta_path, input_msa_path=msa_path, input_pdb_path=pdb_path, msa_output_dir=msa_output_dir, is_prokaryote=is_prokaryote) # Write out features as a pickled dictionary. with open(features_output_path, 'wb') as f: pickle.dump(feature_dict, f, protocol=4) # apply the "remove_msa_for_template_aligned_regions" protocol if remove_msa_for_template_aligned: feature_dict = remove_msa_for_template_aligned_regions(feature_dict) timings['features'] = time.time() - t_0 if feature_only: return unrelaxed_pdbs = {} relaxed_pdbs = {} ranking_confidences = {} # Run the models. num_models = len(model_runners) for model_index, (model_name, model_runner) in enumerate(model_runners.items()): unrelaxed_pdb_path = os.path.join(output_dir, f'unrelaxed_{model_name}.pdb') relaxed_output_path = os.path.join(output_dir, f'relaxed_{model_name}.pdb') if amber_relaxer: final_output_path = relaxed_output_path else: final_output_path = unrelaxed_pdb_path result_output_path = os.path.join(output_dir, f'result_{model_name}.pkl') if os.path.exists(final_output_path) and os.path.exists(result_output_path): # skip running this model and re-use pre-existing results. with open(result_output_path, 'rb') as fp: prediction_result = pickle.load(fp) ranking_confidences[model_name] = prediction_result['ranking_confidence'] with open(final_output_path) as fp: pdb_str = fp.read() if amber_relaxer: relaxed_pdbs[model_name] = pdb_str else: unrelaxed_pdbs[model_name] = pdb_str continue # logging.info('Running model %s on %s', model_name, fasta_name) t_0 = time.time() model_random_seed = model_index + random_seed * num_models processed_feature_dict = model_runner.process_features( feature_dict, random_seed=model_random_seed) timings[f'process_features_{model_name}'] = time.time() - t_0 #processed_feat_path = os.path.join(output_dir, f"features_{model_name}.pkl") #with open(processed_feat_path, 'wb') as f: # pickle.dump(processed_feature_dict, f, protocol=4) t_0 = time.time() prediction_result = model_runner.predict(processed_feature_dict, random_seed=model_random_seed) t_diff = time.time() - t_0 timings[f'predict_benchmark_{model_name}'] = t_diff logging.info( 'Total JAX model %s on %s predict time: %.1fs', model_name, fasta_name, t_diff) plddt = prediction_result['plddt'] ranking_confidences[model_name] = prediction_result['ranking_confidence'] # Save the model outputs. with open(result_output_path, 'wb') as f: pickle.dump(prediction_result, f, protocol=4) # retrieve custom features for outputs retrieve_custom_features(processed_feature_dict, feature_dict) # Add the predicted LDDT in the b-factor column. # Note that higher predicted LDDT value means higher model confidence. plddt_b_factors = np.repeat( plddt[:, None], residue_constants.atom_type_num, axis=-1) unrelaxed_protein = protein.from_prediction( features=processed_feature_dict, result=prediction_result, b_factors=plddt_b_factors, remove_leading_feature_dimension=not model_runner.multimer_mode) unrelaxed_pdbs[model_name] = protein.to_pdb(unrelaxed_protein) with open(unrelaxed_pdb_path, 'w') as f: f.write(unrelaxed_pdbs[model_name]) # Relax the prediction. if amber_relaxer: t_0 = time.time() relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein) timings[f'relax_{model_name}'] = time.time() - t_0 relaxed_pdbs[model_name] = relaxed_pdb_str # Save the relaxed PDB. with open(relaxed_output_path, 'w') as f: f.write(relaxed_pdb_str) # Rank by model confidence and write out relaxed PDBs in rank order. ranked_order = [] for idx, (model_name, _) in enumerate( sorted(ranking_confidences.items(), key=lambda x: x[1], reverse=True)): ranked_order.append(model_name) ranked_output_path = os.path.join(output_dir, f'ranked_{idx}.pdb') with open(ranked_output_path, 'w') as f: if amber_relaxer: f.write(relaxed_pdbs[model_name]) else: f.write(unrelaxed_pdbs[model_name]) ranking_output_path = os.path.join(output_dir, 'ranking_debug.json') with open(ranking_output_path, 'w') as f: label = 'iptm+ptm' if 'iptm' in prediction_result else 'plddts' f.write(json.dumps( {label: ranking_confidences, 'order': ranked_order}, indent=4)) logging.info('Final timings for %s: %s', fasta_name, timings) timings_output_path = os.path.join(output_dir, 'timings.json') with open(timings_output_path, 'w') as f: f.write(json.dumps(timings, indent=4)) def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') # # modified: disabling JIT compilation if not FLAGS.jit: jax.config.update("jax_disable_jit", True) # # CHECK databases and executables for tool_name in ( 'jackhmmer', 'hhblits', 'hhsearch', 'hmmsearch', 'hmmbuild', 'kalign'): if not FLAGS[f'{tool_name}_binary_path'].value: raise ValueError(f'Could not find path to the "{tool_name}" binary. ' 'Make sure it is installed on your system.') # use_small_bfd
# Copyright 2020 The KNIX Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import time import requests import random MAP_AVAILABLE_FRONTENDS = "available_triggers_frontned_map" MAP_TRIGGERS_TO_INFO = "triggers_to_info_map" SET_PENDING_TRIGGERS = "pending_triggers_set" def handle(value, context): assert isinstance(value, dict) data = value action = data["action"].lower() frontend_ip_port = data["self_ip_port"] trigger_status_map = data["trigger_status_map"] trigger_error_map = data["trigger_error_map"] response = {} response_data = {} errmsg = "" if action == "start": handle_start(frontend_ip_port, trigger_status_map, trigger_error_map, context) success = True response_data["message"] = "Triggers Frontend registered with Management service." elif action == "status": handle_status(frontend_ip_port, trigger_status_map, trigger_error_map, context) success = True response_data["message"] = "Triggers Frontend updated successfully." elif action == "stop": handle_stop(frontend_ip_port, trigger_status_map, trigger_error_map, context) success = True response_data["message"] = "Triggers Frontend stopped successfully." else: success = False errmsg = "Unknown action: " + str(action) if success: response["status"] = "success" else: response["status"] = "failure" response_data["message"] = errmsg response["data"] = response_data return response def get_available_frontends(context): tf_hosts = context.getMapKeys(MAP_AVAILABLE_FRONTENDS, True) return tf_hosts def is_frontend_registered(context, frontend_ip_port): return context.containsMapKey(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, True) def get_frontend_info(context, frontend_ip_port): ret = context.getMapEntry(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, True) print("get_frontend_info: data: " + str(ret)) if ret is "" or ret is None: return None else: return json.loads(ret) def remove_frontend_info(context, frontend_ip_port): print("remove_frontend_info: " + frontend_ip_port) context.deleteMapEntry(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, True) def add_frontend_info(context, frontend_ip_port, entry): print("add_frontend_info: " + frontend_ip_port + ", data: " + entry) context.putMapEntry(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, entry, True) def is_trigger_registered(context, trigger_id): return context.containsMapKey(MAP_TRIGGERS_TO_INFO, trigger_id, True) def get_trigger_info(context, trigger_id): ret = context.getMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, True) if ret is "" or ret is None: return None else: return json.loads(ret) def add_trigger_info(context, trigger_id, data): print("add_trigger_info: " + trigger_id + ", data: " + data) context.putMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, data, True) def remove_trigger_info(context, trigger_id): print("remove_trigger_info: " + trigger_id) context.deleteMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, True) def get_user_trigger_list(context, email): user_triggers_list = context.get(email + "_list_triggers", True) if user_triggers_list is not None and user_triggers_list != "": user_triggers_list = json.loads(user_triggers_list) else: user_triggers_list = {} return user_triggers_list def update_user_trigger_list(context, email, user_trigger_list): print("User: " + email + ", Trigger list updates to: " + str(user_trigger_list)) context.put(email + "_list_triggers", user_trigger_list, True) def add_to_global_pending_trigger_set(context, entry): print("add_to_global_pending_trigger_set: data: " + str(entry)) context.addSetEntry(SET_PENDING_TRIGGERS, entry, True) def remove_from_global_pending_trigger_set(context, entry): print("remove_from_global_pending_trigger_set: data: " + str(entry)) context.removeSetEntry(SET_PENDING_TRIGGERS, entry, True) def get_global_pending_trigger_set(context): items = [] items_ret = context.retrieveSet(SET_PENDING_TRIGGERS, True) if items_ret is not None: items = list(items_ret) print("get_global_pending_trigger_set: data: " + str(items)) else: print("get_global_pending_trigger_set: data: None") return items def clear_global_pending_trigger_set(context): context.clearSet(SET_PENDING_TRIGGERS, True) print("clear_global_pending_trigger_set") # called when a frontend starts def handle_start(frontend_ip_port, trigger_status_map, trigger_error_map, context): print("[TriggersFrontend] [START] frontend_ip_port: " + frontend_ip_port + ", trigger_status_map: " + str(trigger_status_map) + ", trigger_error_map: " + str(trigger_error_map)) assert(len(trigger_status_map) == 0) # frontend should not be running anything yet assert(len(trigger_error_map) == 0) # frontend should not be running anything yet frontend_available = is_frontend_registered(context, frontend_ip_port) triggers_to_recreate = [] triggers_to_inform_and_remove = [] if frontend_available: print("Frontend already registered, but it is reporting that it is starting!!") # we have the frontend already registered with us. Why is it starting again, # without telling us that it stopped? Maybe because the stop message did not reach us? # check if we have any triggers that we think should be active, # and were earlier assigned to this frontend which has just started up # such triggers will have to be re-assigned print("[handle_start] First removing information about the old frontend with same ip: " + frontend_ip_port) frontend_info = get_frontend_info(context, frontend_ip_port) remove_frontend_info(context, frontend_ip_port) for trigger_id in frontend_info: trigger_info = get_trigger_info(context, trigger_id) if trigger_info is not None and trigger_info["frontend_ip_port"] == frontend_ip_port: if trigger_info["status"].lower() == "ready": print("[handle_start] queuing trigger to be re-created, since status is ready: " + str(trigger_info)) triggers_to_recreate.append((trigger_info, "")) else: print("[handle_start] queuing trigger to be removed, since status is not ready: " + str(trigger_info)) triggers_to_inform_and_remove.append((trigger_info, "Associated Triggers Frontend not active")) else: # this trigger is now associated with a different frontend, simply remove information pass if len(triggers_to_inform_and_remove) > 0: inform_workflows_for_triggers(triggers_to_inform_and_remove, context) removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context, update_frontend_info=False) new_frontend_entry = {} add_frontend_info(context, frontend_ip_port, json.dumps(new_frontend_entry)) # pending_triggers_from_other_inactive_frontends = health_check_registered_frontends(context) # triggers_to_recreate = triggers_to_recreate + pending_triggers_from_other_inactive_frontends # pending_global_triggers = get_info_for_global_pending_triggers(context) # triggers_to_recreate = triggers_to_recreate + pending_global_triggers # recreate_pending_triggers(triggers_to_recreate, context) for (trigger_info, error_msg) in triggers_to_recreate: print("[handle_start] Queuing up to be recreated, trigger_id: " + trigger_info["trigger_id"] + ", trigger_info: " + str(trigger_info)) add_to_global_pending_trigger_set(context, trigger_info["trigger_id"]) def handle_status(frontend_ip_port, trigger_status_map, trigger_error_map, context): print("[TriggersFrontend] [STATUS], frontend_ip_port: " + frontend_ip_port + ", trigger_status_map: " + str(trigger_status_map) + ", trigger_error_map: " + str(trigger_error_map)) triggers_to_inform_and_remove = [] triggers_to_recreate = [] frontend_available = is_frontend_registered(context, frontend_ip_port) if frontend_available: # we know about this frontend frontend_info = get_frontend_info(context, frontend_ip_port) assert(frontend_info is not None) print("Known frontend with data: " + str(frontend_info)) # first check if any trigger has stopped unexpectedly, and check if we had this trigger registered with us # if so, then remove this trigger from our known list and put them in pending list for error_trigger_id in trigger_error_map: error_trigger_info = get_trigger_info(context, error_trigger_id) if error_trigger_id in frontend_info and error_trigger_info is not None: if error_trigger_info["status"].lower() == "ready": print("[handle_status] queuing trigger to be removed, since it stopped unexpectedly: " + str(error_trigger_info) + ", error message: " + str(trigger_error_map[error_trigger_id])) triggers_to_inform_and_remove.append((error_trigger_info, trigger_error_map[error_trigger_id])) if len(triggers_to_inform_and_remove) > 0: inform_workflows_for_triggers(triggers_to_inform_and_remove, context) removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context) else: # we don't know about this frontend. Ideally it should not have any triggers print("Unknown frontend sending a status update!!") new_frontend_entry = {} add_frontend_info(context, frontend_ip_port, json.dumps(new_frontend_entry)) pending_triggers_from_other_inactive_frontends = health_check_registered_frontends(context) triggers_to_recreate = triggers_to_recreate + pending_triggers_from_other_inactive_frontends pending_global_triggers = get_info_for_global_pending_triggers(context) triggers_to_recreate = triggers_to_recreate + pending_global_triggers recreate_pending_triggers(triggers_to_recreate, context) def handle_stop(frontend_ip_port, trigger_status_map, trigger_error_map, context): print("[TriggersFrontend] [STOP], frontend_ip_port: " + frontend_ip_port + ", trigger_status_map: " + str(trigger_status_map) + ", trigger_error_map: " + str(trigger_error_map)) assert(len(trigger_status_map) == 0) frontend_info = get_frontend_info(context, frontend_ip_port) assert(frontend_info is not None) remove_frontend_info(context, frontend_ip_port) triggers_to_recreate = [] triggers_to_inform_and_remove = [] for error_trigger_id in trigger_error_map: error_trigger_info = get_trigger_info(context, error_trigger_id) if error_trigger_id in frontend_info and error_trigger_info is not None: #if error_trigger_info["status"].lower() == "ready" and "ready trigger shutdown!" in trigger_error_map[error_trigger_id].lower(): if error_trigger_info["status"].lower() == "ready": triggers_to_recreate.append((error_trigger_info, trigger_error_map[error_trigger_id])) else: print("[handle_stop] queuing trigger to be removed, since status is not ready: " + str(error_trigger_info)) triggers_to_inform_and_remove.append((error_trigger_info, trigger_error_map[error_trigger_id])) if len(triggers_to_inform_and_remove) > 0: inform_workflows_for_triggers(triggers_to_inform_and_remove, context) removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context, update_frontend_info=False) #pending_triggers_from_other_inactive_frontends = health_check_registered_frontends(context) #triggers_to_recreate = triggers_to_recreate + pending_triggers_from_other_inactive_frontends #pending_global_triggers = get_info_for_global_pending_triggers(context) #triggers_to_recreate = triggers_to_recreate + pending_global_triggers #recreate_pending_triggers(triggers_to_recreate, context) for (trigger_info, error_msg) in triggers_to_recreate: print("[handle_stop] Queuing up to be recreated, trigger_id: " + trigger_info["trigger_id"] + ", trigger_info: " + str(trigger_info)) add_to_global_pending_trigger_set(context, trigger_info["trigger_id"]) def get_info_for_global_pending_triggers(context): global_pending_triggers = get_global_pending_trigger_set(context) clear_global_pending_trigger_set(context) triggers_to_recreate = [] for trigger_id in global_pending_triggers: pending_trigger_info = get_trigger_info(context, trigger_id) if pending_trigger_info is not None: print("[get_info_for_global_pending_triggers] Queuing trigger to be re-created: pending_trigger_info = " + str(pending_trigger_info)) triggers_to_recreate.append((pending_trigger_info, "")) return triggers_to_recreate def get_active_frontend(context): tf_hosts = get_available_frontends(context) if len(tf_hosts) == 0: print("No available TriggersFrontend found") return "" tf_hosts = list(tf_hosts) tf_ip_port = select_random_active_frontend(tf_hosts) if tf_ip_port is None or tf_ip_port is "": print("No active TriggersFrontend found") return "" return tf_ip_port def recreate_pending_triggers(triggers_to_recreate, context): print("[recreate_pending_triggers] called with number of triggers: " + str(len(triggers_to_recreate))) triggers_to_inform_and_remove = [] for (trigger_info, error_msg) in triggers_to_recreate: print("[recreate_pending_triggers] Attempting to recreate trigger_id: " + trigger_info["trigger_id"] + ", trigger_info: " + str(trigger_info)) active_frontend = get_active_frontend(context) if active_frontend is not "": # there is an active frontend available, try to re-create the trigger try: status, updated_info = attempt_to_recreate_single_trigger(trigger_info, active_frontend, context) if status: # trigger created, attempt to add workflow associations associated_workflows = updated_info["associated_workflows"].copy() print("[recreate_pending_triggers] Attempting to attach trigger to associated_workflows = " + str(associated_workflows)) for workflow_name in associated_workflows: association_status = attempt_to_associate_trigger_with_workflows(updated_info["trigger_id"], workflow_name, context) if association_status == False: del updated_info["associated_workflows"][workflow_name] add_trigger_info(context, updated_info["trigger_id"], json.dumps(updated_info)) print("[recreate_pending_triggers] Removed workflow: " + str(workflow_name) + ", from associated_workflows of trigger_info: " + str(updated_info)) else: # need to add this to the list of inform list and then remove it if updated_info is not None: print("[recreate_pending_triggers] Unable to recreate trigger. Queuing to be removed, trigger_id: " + updated_info["trigger_id"] + ", trigger_info: " + str(updated_info)) triggers_to_inform_and_remove.append((updated_info, "Unable to recreate trigger")) except Exception as e: print("[recreate_pending_triggers] Exception in
import requests import json from sseclient import SSEClient from threading import Thread import colorsys # Preset colours RED = (255, 0, 0) ORANGE = (255, 165, 0) YELLOW = (255, 255, 0) GREEN = (0, 255, 0) LIGHT_BLUE = (173, 216, 230) BLUE = (0, 0, 255) PINK = (255, 192, 203) PURPLE = (128, 0, 128) WHITE = (255, 255, 255) class Nanoleaf(): """The Nanoleaf class for controlling the Light Panels and Canvas :ivar ip: IP of the Nanoleaf device :ivar url: The base URL for requests :ivar auth_token: The authentication token for the API :ivar print_errors: True for errors to be shown, otherwise False """ def __init__(self, ip, auth_token=None, print_errors=False): """Initalises Nanoleaf class with desired arguments. :param ip: The IP address of the Nanoleaf device :param auth_token: Optional, include Nanoleaf authentication token here if generated, otherwise call generate_auth_token() after initlisation :param print_errors: Optional, True to show errors in the console :type ip: str :type auth_token: str :type print_errors: bool """ self.ip = ip self.url = "http://" + ip + ":16021/api/v1/" + str(auth_token) self.auth_token = auth_token self.print_errors = print_errors self.already_registered = False try: self.__check_connection() except: raise Exception("No valid Nanoleaf device found on IP: " + self.ip) def __error_check(self, code): """Checks and displays error messages Determines the request status code and prints the error, if print_errors is true. :param code: The error code :returns: Returns True if request was successful, otherwise False """ if self.print_errors: if code == 200 or code == 204: print("Action performed successfully.") return True elif code == 400: print("Error 400: Bad request.") elif code == 401: print("Error 401: Unauthorized, incorrect auth token. " + "Please generate a new one.") elif code == 403: print("Unauthorized, please hold the power button on the controller for 5-7 seconds, then try again.") elif code == 404: print("Error 404: Resource not found.") elif code == 500: print("Error 500: Internal server error.") return False else: if code == 200 or code == 204: return True else: return False def generate_auth_token(self): """Generates authentication token for device The power button on the device should be held for 5-7 seconds, then this method should be run. This will set both the auth_token and url instance variables. The authentication token printed in the console should be stored for use in own program and future instances of this class should initalised using this. :returns: True if successful, otherwise False """ url = "http://" + self.ip + ":16021/api/v1/new" r = requests.post(url) if r.status_code == 200: self.auth_token = json.loads(r.text)['auth_token'] print("Auth token successfully generated! Token: " + self.auth_token) self.url = "http://" + self.ip + ":16021/api/v1/" + str(self.auth_token) return True else: return self.__error_check(r.status_code) def delete_user(self, auth_token): """Deletes an authentication token Deletes an authentication token. This token can no longer be used as part of an API call to control the device. If required, generate a new one using generate_auth_token(). :param auth_token: The authentication token to delete :returns: True if successful, otherwise False """ url = "http://" + self.ip + ":16021/api/v1/" + str(auth_token) r = requests.delete(url) return self.__error_check(r.status_code) def __check_connection(self): """Ensures there is a valid connection""" requests.get(self.url, timeout=5) def get_panel_info(self): """Returns a dictionary of device information""" r = requests.get(self.url) return json.loads(r.text) ####################################################### #### POWER #### ####################################################### def power_off(self): """Powers off the lights :returns: True if successful, otherwise False """ data = {"on" : {"value": False}} r = requests.put(self.url + "/state", data=json.dumps(data)) return self.__error_check(r.status_code) def power_on(self): """Powers on the lights :returns: True if successful, otherwise False """ data = {"on" : {"value": True}} r = requests.put(self.url + "/state", data=json.dumps(data)) return self.__error_check(r.status_code) def get_power(self): """Returns the power status of the lights :returns: True if on, False if off """ r = requests.get(self.url + "/state/on") ans = json.loads(r.text) return ans['value'] def toggle_power(self): """Toggles the lights on/off""" if self.get_power(): return self.power_off() else: return self.power_on() ####################################################### #### COLOUR #### ####################################################### def set_color(self, rgb): """Sets the colour of the lights :param rgb: Tuple in the format (r, g, b) :returns: True if successful, otherwise False """ hsv_colour = colorsys.rgb_to_hsv(rgb[0]/255, rgb[1]/255, rgb[2]/255) hsv_colour = list(hsv_colour) hsv_colour[0] *= 360 hsv_colour[1] *= 100 hsv_colour[2] *= 100 final_colour = [ int(x) for x in hsv_colour ] data = { "hue" : {"value": final_colour[0]}, "sat": {"value": final_colour[1]}, "brightness": {"value": final_colour[2], "duration": 0} } r = requests.put(self.url + "/state", data=json.dumps(data)) return self.__error_check(r.status_code) ####################################################### #### ADJUST BRIGHTNESS #### ####################################################### def set_brightness(self, brightness, duration=0): """Sets the brightness of the lights :param brightness: The required brightness (between 0 and 100) :param duration: The duration over which to change the brightness :returns: True if successful, otherwise False """ if brightness > 100 or brightness < 0: raise ValueError('Brightness should be between 0 and 100') data = {"brightness" : {"value": brightness, "duration": duration}} r = requests.put(self.url + "/state", data=json.dumps(data)) return self.__error_check(r.status_code) def increment_brightness(self, brightness): """Increments the brightness of the lights :param brightness: How much to increment the brightness, can also be negative :returns: True if successful, otherwise False """ data = {"brightness" : {"increment": brightness}} r = requests.put(self.url + "/state", data = json.dumps(data)) return self.__error_check(r.status_code) def get_brightness(self): """Returns the current brightness value of the lights""" r = requests.get(self.url + "/state/brightness") ans = json.loads(r.text) return ans['value'] ####################################################### #### IDENTIFY #### ####################################################### def identify(self): """Runs the identify sequence on the lights :returns: True if successful, otherwise False """ r = requests.put(self.url + "/identify") return self.__error_check(r.status_code) ####################################################### #### HUE #### ####################################################### def set_hue(self, value): """Sets the hue of the lights :param value: The required hue (between 0 and 360) :returns: True if successful, otherwise False """ if value > 360 or value < 0: raise ValueError('Hue should be between 0 and 360') data = {"hue" : {"value" : value}} r = requests.put(self.url + "/state", data=json.dumps(data)) return self.__error_check(r.status_code) def increment_hue(self, value): """Increments the hue of the lights :param value: How much to increment the hue, can also be negative :returns: True if successful, otherwise False """ data = {"hue" : {"increment" : value}} r = requests.put(self.url + "/state", data=json.dumps(data)) return self.__error_check(r.status_code) def get_hue(self): """Returns the current hue value of the lights""" r = requests.get(self.url + "/state/hue") ans = json.loads(r.text) return ans['value'] ####################################################### #### SATURATION #### ####################################################### def set_saturation(self, value): """Sets the saturation of the lights :param value: The required saturation (between 0 and 100) :returns: True if successful, otherwise False """ if value > 100 or value < 0: raise ValueError('Saturation should be between 0 and 100') data = {"sat" : {"value" : value}} r = requests.put(self.url + "/state", data=json.dumps(data)) return self.__error_check(r.status_code) def increment_saturation(self, value): """Increments the saturation of the lights :param brightness: How much to increment the saturation, can also be negative. :returns: True if successful, otherwise False """ data = {"sat" : {"increment" : value}} r = requests.put(self.url + "/state", data=json.dumps(data)) return self.__error_check(r.status_code) def get_saturation(self): """Returns the current saturation value of the lights""" r = requests.get(self.url + "/state/sat") ans = json.loads(r.text) return ans['value'] ####################################################### #### COLOUR TEMPERATURE #### ####################################################### def set_color_temp(self, value): """Sets the white colour temperature of the lights :param value: The required colour temperature (between 0 and 100) :returns: True if successful, otherwise False """ if value > 6500 or value < 1200: raise ValueError('Colour temp should be between 1200 and 6500') data = {"ct" : {"value" : value}} r = requests.put(self.url + "/state", json.dumps(data)) return self.__error_check(r.status_code) def increment_color_temp(self, value): """Sets the white colour temperature of the lights :param value: How much to increment the colour temperature by, can also be negative. :returns: True if successful, otherwise False """ data = {"ct" : {"increment" : value}} r = requests.put(self.url + "/state", json.dumps(data)) return self.__error_check(r.status_code) def get_color_temp(self): """Returns the current colour temperature of the lights""" r = requests.get(self.url + "/state/ct") ans = json.loads(r.text) return ans['value'] ####################################################### #### COLOUR MODE #### ####################################################### def get_color_mode(self): """Returns the colour mode of the lights""" response = requests.get(self.url + "/state/colorMode") return json.loads(response.text) ####################################################### #### EFFECTS #### ####################################################### def get_current_effect(self): """Returns the currently selected effect If the name of the effect isn't available, this will return *Solid*, *Dynamic* or *Static* instead.
for a valid user with authorization and valid input, ensure correct response: 404 and no metadata if the GUID or alias does not exist in indexd. """ fake_jwt = "1.2.3" guid_or_alias = "test_guid_alias" indexd_did = "dg.hello/test_guid" indexd_data = { "did": indexd_did, "rev": "123", "file_name": "im_a_blank_record.pfb", "acl": ["resource"], "authz": ["/path/to/resource"], } new_version_guid = valid_upload_file_patcher["data_upload_mocked_reponse"].get( "guid" ) new_version_data = { "did": new_version_guid, "rev": "987", "file_name": "im_another_blank_record.pfb", } # mock: creating a new version of "indexd_did" returns "new_version_data" indexd_blank_version_mocked_request = respx.post( f"{config.INDEXING_SERVICE_ENDPOINT}/index/blank/{indexd_did}", status_code=200, content=new_version_data, alias="indexd_post_blank", ) # mock the request to indexd: GUID or alias NOT found in indexd indexd_url = f"{config.INDEXING_SERVICE_ENDPOINT}/{guid_or_alias}" indexd_get_mocked_request = respx.get( indexd_url, status_code=404, content=indexd_data, alias="indexd_get" ) resp = client.post( f"/objects/{guid_or_alias}", json=data, headers={"Authorization": f"bearer {fake_jwt}"}, ) # check response contents assert resp.status_code == 404 assert resp.json().get("detail") assert not resp.json().get("guid") assert not resp.json().get("upload_url") assert not resp.json().get("aliases") assert not resp.json().get("metadata") assert indexd_get_mocked_request.called @respx.mock @pytest.mark.parametrize( "data", [ # all valid fields { "file_name": "test.txt", "aliases": ["abcdefg"], "metadata": {"foo": "bar"}, }, ], ) def test_create_for_guid_no_access_to_create_blank_version( client, valid_upload_file_patcher, data ): """ Test create /objects/<GUID or alias> for valid input, but a user without authorization to create a blank version in indexd. Should return 403. """ fake_jwt = "1.2.3" guid_or_alias = "test_guid_alias" indexd_did = "dg.hello/test_guid" indexd_data = { "did": indexd_did, "rev": "123", "file_name": "im_a_blank_record.pfb", "acl": ["resource"], "authz": ["/path/to/resource"], } # mock: creating a new version of "indexd_did" returns 403 unauthorized indexd_blank_version_mocked_request = respx.post( f"{config.INDEXING_SERVICE_ENDPOINT}/index/blank/{indexd_did}", status_code=403, alias="indexd_post_blank", ) # mock the request to indexd: GUID or alias found in indexd indexd_url = f"{config.INDEXING_SERVICE_ENDPOINT}/{guid_or_alias}" indexd_get_mocked_request = respx.get( indexd_url, status_code=200, content=indexd_data, alias="indexd_get" ) resp = client.post( f"/objects/{guid_or_alias}", json=data, headers={"Authorization": f"bearer {fake_jwt}"}, ) # check response contents assert resp.status_code == 403 assert resp.json().get("detail") assert not resp.json().get("guid") assert not resp.json().get("upload_url") assert not resp.json().get("aliases") assert not resp.json().get("metadata") assert indexd_get_mocked_request.called assert indexd_get_mocked_request.called assert indexd_blank_version_mocked_request.called @respx.mock @pytest.mark.parametrize( "data", [ # all valid fields { "file_name": "test.txt", "aliases": ["abcdefg"], "metadata": {"foo": "bar"}, }, ], ) def test_create_for_guid_no_access_to_upload( client, no_authz_upload_file_patcher, data ): """ Test create /objects/<GUID or alias> for valid input, but a user without authorization to get a presigned URL for upload. Should return 403. """ fake_jwt = "<PASSWORD>" guid_or_alias = "test_guid_alias" indexd_did = "dg.hello/test_guid" indexd_data = { "did": indexd_did, "rev": "123", "file_name": "im_a_blank_record.pfb", "acl": ["resource"], "authz": ["/path/to/resource"], } new_version_guid = no_authz_upload_file_patcher["data_upload_mocked_reponse"].get( "guid" ) new_version_data = { "did": new_version_guid, "rev": "987", "file_name": "im_another_blank_record.pfb", } # mock: creating a new version of "indexd_did" returns "new_version_data" indexd_blank_version_mocked_request = respx.post( f"{config.INDEXING_SERVICE_ENDPOINT}/index/blank/{indexd_did}", status_code=200, content=new_version_data, alias="indexd_post_blank", ) # mock the request to indexd: GUID or alias found in indexd indexd_url = f"{config.INDEXING_SERVICE_ENDPOINT}/{guid_or_alias}" indexd_get_mocked_request = respx.get( indexd_url, status_code=200, content=indexd_data, alias="indexd_get" ) resp = client.post( f"/objects/{guid_or_alias}", json=data, headers={"Authorization": f"bearer {fake_jwt}"}, ) # check response contents assert resp.status_code == 403 assert resp.json().get("detail") assert not resp.json().get("guid") assert not resp.json().get("upload_url") assert not resp.json().get("aliases") assert not resp.json().get("metadata") assert indexd_get_mocked_request.called assert indexd_get_mocked_request.called assert indexd_blank_version_mocked_request.called @respx.mock def test_get_object_in_indexd(client): """ Test the GET object endpoint when the provided key exists in indexd. If the key is an indexd alias, the metadata returned should be associated with the indexd GUID (did), not the alias itself. If the key exists in indexd, the record should be returned regardless of a 404 from MDS. """ guid_or_alias = "dg.hello/test_guid" indexd_did = "test_did" # mock the request to indexd: GUID or alias found in indexd indexd_url = f"{config.INDEXING_SERVICE_ENDPOINT}/{guid_or_alias}" indexd_data = {"did": indexd_did, "size": 42} indexd_get_mocked_request = respx.get( indexd_url, status_code=200, content=indexd_data ) # GET an object that exists in indexd but NOT in MDS get_object_url = f"/objects/{guid_or_alias}" resp = client.get(get_object_url) assert indexd_get_mocked_request.called assert resp.status_code == 200, resp.text assert resp.json() == {"record": indexd_data, "metadata": {}} # create metadata for this object mds_data = dict(a=1, b=2) client.post("/metadata/" + indexd_did, json=mds_data).raise_for_status() # GET an object that exists in indexd AND in MDS try: resp = client.get(get_object_url) assert indexd_get_mocked_request.called assert resp.status_code == 200, resp.text assert resp.json() == {"record": indexd_data, "metadata": mds_data} finally: client.delete("/metadata/" + indexd_did) @respx.mock def test_get_object_not_in_indexd(client): """ Test the GET object endpoint when the provided key does NOT exist in indexd, or when indexd errors. If the key exists in MDS, the metadata should be returned regardless of a non-200 response from indexd. """ guid_or_alias = "dg.hello/test_guid" # mock the request to indexd: GUID or alias NOT found in indexd indexd_url = f"{config.INDEXING_SERVICE_ENDPOINT}/{guid_or_alias}" indexd_get_mocked_request = respx.get(indexd_url, status_code=404) # GET an object that exists in NEITHER indexd NOR MDS get_object_url = f"/objects/{guid_or_alias}" resp = client.get(get_object_url) assert indexd_get_mocked_request.called assert resp.status_code == 404, resp.text # create metadata for this object mds_data = dict(a=1, b=2) client.post("/metadata/" + guid_or_alias, json=mds_data).raise_for_status() try: # GET an object that exists in MDS but NOT in indexd resp = client.get(get_object_url) assert indexd_get_mocked_request.called assert resp.status_code == 200, resp.text assert resp.json() == {"record": {}, "metadata": mds_data} # mock the request to indexd: 500 error from indexd respx.clear() indexd_get_mocked_request = respx.get(indexd_url, status_code=500) # GET an object that exists in MDS, even if indexd failed resp = client.get(get_object_url) assert indexd_get_mocked_request.called assert resp.status_code == 200, resp.text assert resp.json() == {"record": {}, "metadata": mds_data} finally: client.delete("/metadata/" + guid_or_alias) @respx.mock def test_get_object_signed_download_url_for_data_access_200( client, download_endpoints, signed_url_mock ): """ Test that mds returns a 200 containing the signed download url when the data access service download endpoint returns a 200. """ data_access_signed_download_get_request_mock = respx.get( download_endpoints["data_access"], status_code=200, content={"url": signed_url_mock}, ) resp = client.get(download_endpoints["mds"]) assert data_access_signed_download_get_request_mock.called assert resp.status_code == 200, resp.text resp_json = resp.json() assert "url" in resp_json assert resp_json["url"] == signed_url_mock @respx.mock def test_get_object_signed_download_url_for_data_access_404( client, download_endpoints, signed_url_mock ): """ Test that mds returns a 404 when the data access service download endpoint returns a 404. """ data_access_signed_download_get_request_mock = respx.get( download_endpoints["data_access"], status_code=404 ) resp = client.get(download_endpoints["mds"]) assert data_access_signed_download_get_request_mock.called assert resp.status_code == 404, resp.text @respx.mock def test_get_object_signed_download_url_for_data_access_401( client, download_endpoints, signed_url_mock ): """ Test that mds returns a 403 when the data access service download endpoint returns a 401. """ data_access_signed_download_get_request_mock = respx.get( download_endpoints["data_access"], status_code=401 ) resp = client.get(download_endpoints["mds"]) assert data_access_signed_download_get_request_mock.called assert resp.status_code == 403, resp.text @respx.mock def test_get_object_signed_download_url_for_data_access_403( client, download_endpoints, signed_url_mock ): """ Test that mds returns a 403 when the data access service download endpoint returns a 403. """ data_access_signed_download_get_request_mock = respx.get( download_endpoints["data_access"], status_code=403 ) resp = client.get(download_endpoints["mds"]) assert data_access_signed_download_get_request_mock.called assert resp.status_code == 403, resp.text @respx.mock def test_get_object_signed_download_url_for_data_access_500( client, download_endpoints, signed_url_mock ): """ Test that mds returns a 500 when the data access service download endpoint returns a 500. """ data_access_signed_download_get_request_mock = respx.get( download_endpoints["data_access"], status_code=500 ) resp = client.get(download_endpoints["mds"]) assert data_access_signed_download_get_request_mock.called assert resp.status_code == 500, resp.text @respx.mock def test_get_object_latest_when_indexd_returns_different_guid_and_different_guid_in_mds( client, latest_setup ): """ Test that mds returns a 200 containing an indexd record and mds object associated with the guid returned from indexd's latest endpoint (in this case, that latest guid returned from indexd is different to the oldest guid initially provided to the mds latest endpoint). """ get_indexd_latest_request_mock = respx.get( latest_setup["indexd_latest_endpoint_with_oldest_guid"], status_code=200, content=latest_setup["indexd_latest_record_data"], ) resp = client.get(latest_setup["mds_latest_endpoint_with_oldest_guid"]) assert get_indexd_latest_request_mock.called assert resp.status_code == 200, resp.text assert resp.json() == { "record": latest_setup["indexd_latest_record_data"], "metadata": latest_setup["mds_objects"]["latest"]["data"], } @respx.mock def test_get_object_latest_when_indexd_returns_same_guid_and_same_guid_in_mds( client, latest_setup ): """ Test that mds returns a 200 containing an indexd record and mds object associated with the guid initially provided to the mds latest endpoint (in this case, indexd's latest endpoint returns a guid that is the same as the one intially provided to the mds latest endpoint). """ get_indexd_latest_request_mock = respx.get( latest_setup["indexd_latest_endpoint_with_oldest_guid"], status_code=200, content=latest_setup["indexd_oldest_record_data"], ) resp = client.get(latest_setup["mds_latest_endpoint_with_oldest_guid"]) assert get_indexd_latest_request_mock.called assert resp.status_code == 200, resp.text assert resp.json() == { "record": latest_setup["indexd_oldest_record_data"], "metadata": latest_setup["mds_objects"]["oldest"]["data"], } @respx.mock def test_get_object_latest_when_indexd_returns_guid_not_in_mds(client, latest_setup): """ Test that mds returns a 200 containing an indexd record associated with the guid returned from the indexd latest endpoint and an empty metadata object (in this case, the indexd latest endpoint returns a guid that is not a key in the mds database). """ get_indexd_latest_request_mock = respx.get( latest_setup["indexd_latest_endpoint_with_oldest_guid"], status_code=200, content=latest_setup["indexd_non_mds_record_data"], ) resp = client.get(latest_setup["mds_latest_endpoint_with_oldest_guid"]) assert get_indexd_latest_request_mock.called assert resp.status_code == 200, resp.text assert resp.json() == { "record": latest_setup["indexd_non_mds_record_data"], "metadata": {}, } @respx.mock def test_get_object_latest_when_indexd_returns_404_but_guid_in_mds( client, latest_setup ): """ Test that mds returns a 200 containing an empty indexd record and a metadata object associated with the guid/key intially provided to the mds latest endpoint (in this case, indexd's latest endpoint returns a 404). """ get_indexd_latest_request_mock = respx.get( latest_setup["indexd_latest_endpoint_with_oldest_guid"], status_code=404, ) resp = client.get(latest_setup["mds_latest_endpoint_with_oldest_guid"]) assert get_indexd_latest_request_mock.called assert resp.status_code
# -*- coding: utf-8 -*- """ TODO: separate out the tests and make this file just generate the demo data """ import logging import itertools as it import numpy as np import utool as ut from wbia.algo.graph.state import POSTV, NEGTV, INCMP, UNREV from wbia.algo.graph.state import SAME, DIFF, NULL # NOQA print, rrr, profile = ut.inject2(__name__) logger = logging.getLogger('wbia') def make_dummy_infr(annots_per_name): import wbia nids = [val for val, num in enumerate(annots_per_name, start=1) for _ in range(num)] aids = range(len(nids)) infr = wbia.AnnotInference(None, aids, nids=nids, autoinit=True, verbose=1) return infr def demodata_mtest_infr(state='empty'): import wbia ibs = wbia.opendb(db='PZ_MTEST') annots = ibs.annots() names = list(annots.group_items(annots.nids).values()) ut.shuffle(names, rng=321) test_aids = ut.flatten(names[1::2]) infr = wbia.AnnotInference(ibs, test_aids, autoinit=True) infr.reset(state=state) return infr def demodata_infr2(defaultdb='PZ_MTEST'): defaultdb = 'PZ_MTEST' import wbia ibs = wbia.opendb(defaultdb=defaultdb) annots = ibs.annots() names = list(annots.group_items(annots.nids).values())[0:20] def dummy_phi(c, n): x = np.arange(n) phi = c * x / (c * x + 1) phi = phi / phi.sum() phi = np.diff(phi) return phi phis = {c: dummy_phi(c, 30) for c in range(1, 4)} aids = ut.flatten(names) infr = wbia.AnnotInference(ibs, aids, autoinit=True) infr.init_termination_criteria(phis) infr.init_refresh_criteria() # Partially review n1, n2, n3, n4 = names[0:4] for name in names[4:]: for a, b in ut.itertwo(name.aids): infr.add_feedback((a, b), POSTV) for name1, name2 in it.combinations(names[4:], 2): infr.add_feedback((name1.aids[0], name2.aids[0]), NEGTV) return infr def demo2(): """ CommandLine: python -m wbia.algo.graph.demo demo2 --viz python -m wbia.algo.graph.demo demo2 Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.graph.demo import * # NOQA >>> result = demo2() >>> print(result) """ import wbia.plottool as pt from wbia.scripts.thesis import TMP_RC import matplotlib as mpl mpl.rcParams.update(TMP_RC) # ---- Synthetic data params params = { 'redun.pos': 2, 'redun.neg': 2, } # oracle_accuracy = .98 # oracle_accuracy = .90 # oracle_accuracy = (.8, 1.0) oracle_accuracy = (0.85, 1.0) # oracle_accuracy = 1.0 # --- draw params VISUALIZE = ut.get_argflag('--viz') # QUIT_OR_EMEBED = 'embed' QUIT_OR_EMEBED = 'quit' TARGET_REVIEW = ut.get_argval('--target', type_=int, default=None) START = ut.get_argval('--start', type_=int, default=None) END = ut.get_argval('--end', type_=int, default=None) # ------------------ # rng = np.random.RandomState(42) # infr = demodata_infr(num_pccs=4, size=3, size_std=1, p_incon=0) # infr = demodata_infr(num_pccs=6, size=7, size_std=1, p_incon=0) # infr = demodata_infr(num_pccs=3, size=5, size_std=.2, p_incon=0) infr = demodata_infr(pcc_sizes=[5, 2, 4]) infr.verbose = 100 # apply_dummy_viewpoints(infr) # infr.ensure_cliques() infr.ensure_cliques() infr.ensure_full() # infr.apply_edge_truth() # Dummy scoring infr.init_simulation(oracle_accuracy=oracle_accuracy, name='demo2') # infr_gt = infr.copy() dpath = ut.ensuredir(ut.truepath('~/Desktop/demo')) ut.remove_files_in_dir(dpath) fig_counter = it.count(0) def show_graph(infr, title, final=False, selected_edges=None): if not VISUALIZE: return # TODO: rich colored text? latest = '\n'.join(infr.latest_logs()) showkw = dict( # fontsize=infr.graph.graph['fontsize'], # fontname=infr.graph.graph['fontname'], show_unreviewed_edges=True, show_inferred_same=False, show_inferred_diff=False, outof=(len(infr.aids)), # show_inferred_same=True, # show_inferred_diff=True, selected_edges=selected_edges, show_labels=True, simple_labels=True, # show_recent_review=not final, show_recent_review=False, # splines=infr.graph.graph['splines'], reposition=False, # with_colorbar=True ) verbose = infr.verbose infr.verbose = 0 infr_ = infr.copy() infr_ = infr infr_.verbose = verbose infr_.show(pickable=True, verbose=0, **showkw) infr.verbose = verbose # logger.info('status ' + ut.repr4(infr_.status())) # infr.show(**showkw) ax = pt.gca() pt.set_title(title, fontsize=20) fig = pt.gcf() fontsize = 22 if True: # postprocess xlabel lines = [] for line in latest.split('\n'): if False and line.startswith('ORACLE ERROR'): lines += ['ORACLE ERROR'] else: lines += [line] latest = '\n'.join(lines) if len(lines) > 10: fontsize = 16 if len(lines) > 12: fontsize = 14 if len(lines) > 14: fontsize = 12 if len(lines) > 18: fontsize = 10 if len(lines) > 23: fontsize = 8 if True: pt.adjust_subplots(top=0.95, left=0, right=1, bottom=0.45, fig=fig) ax.set_xlabel('\n' + latest) xlabel = ax.get_xaxis().get_label() xlabel.set_horizontalalignment('left') # xlabel.set_x(.025) xlabel.set_x(-0.6) # xlabel.set_fontname('CMU Typewriter Text') xlabel.set_fontname('Inconsolata') xlabel.set_fontsize(fontsize) ax.set_aspect('equal') # ax.xaxis.label.set_color('red') from os.path import join fpath = join(dpath, 'demo_{:04d}.png'.format(next(fig_counter))) fig.savefig( fpath, dpi=300, # transparent=True, edgecolor='none', ) # pt.save_figure(dpath=dpath, dpi=300) infr.latest_logs() if VISUALIZE: infr.update_visual_attrs(groupby='name_label') infr.set_node_attrs('pin', 'true') node_dict = ut.nx_node_dict(infr.graph) logger.info(ut.repr4(node_dict[1])) if VISUALIZE: infr.latest_logs() # Pin Nodes into the target groundtruth position show_graph(infr, 'target-gt') logger.info(ut.repr4(infr.status())) infr.clear_feedback() infr.clear_name_labels() infr.clear_edges() logger.info(ut.repr4(infr.status())) infr.latest_logs() if VISUALIZE: infr.update_visual_attrs() infr.prioritize('prob_match') if VISUALIZE or TARGET_REVIEW is None or TARGET_REVIEW == 0: show_graph(infr, 'initial state') def on_new_candidate_edges(infr, edges): # hack updateing visual attrs as a callback infr.update_visual_attrs() infr.on_new_candidate_edges = on_new_candidate_edges infr.params.update(**params) infr.refresh_candidate_edges() VIZ_ALL = VISUALIZE and TARGET_REVIEW is None and START is None logger.info('VIZ_ALL = %r' % (VIZ_ALL,)) if VIZ_ALL or TARGET_REVIEW == 0: show_graph(infr, 'find-candidates') # _iter2 = enumerate(infr.generate_reviews(**params)) # _iter2 = list(_iter2) # assert len(_iter2) > 0 # prog = ut.ProgIter(_iter2, label='demo2', bs=False, adjust=False, # enabled=False) count = 1 first = 1 for edge, priority in infr._generate_reviews(data=True): msg = 'review #%d, priority=%.3f' % (count, priority) logger.info('\n----------') infr.print('pop edge {} with priority={:.3f}'.format(edge, priority)) # logger.info('remaining_reviews = %r' % (infr.remaining_reviews()),) # Make the next review if START is not None: VIZ_ALL = count >= START if END is not None and count >= END: break infr.print(msg) if ut.allsame(infr.pos_graph.node_labels(*edge)) and first: # Have oracle make a mistake early feedback = infr.request_oracle_review(edge, accuracy=0) first -= 1 else: feedback = infr.request_oracle_review(edge) AT_TARGET = TARGET_REVIEW is not None and count >= TARGET_REVIEW - 1 SHOW_CANDIATE_POP = True if SHOW_CANDIATE_POP and (VIZ_ALL or AT_TARGET): # import utool # utool.embed() infr.print( ut.repr2(infr.task_probs['match_state'][edge], precision=4, si=True) ) infr.print('len(queue) = %r' % (len(infr.queue))) # Show edge selection infr.print('Oracle will predict: ' + feedback['evidence_decision']) show_graph(infr, 'pre' + msg, selected_edges=[edge]) if count == TARGET_REVIEW: infr.EMBEDME = QUIT_OR_EMEBED == 'embed' infr.add_feedback(edge, **feedback) infr.print('len(queue) = %r' % (len(infr.queue))) # infr.apply_nondynamic_update() # Show the result if VIZ_ALL or AT_TARGET: show_graph(infr, msg) # import sys # sys.exit(1) if count == TARGET_REVIEW: break count += 1 infr.print('status = ' + ut.repr4(infr.status(extended=False))) show_graph(infr, 'post-review (#reviews={})'.format(count), final=True) # ROUND 2 FIGHT # if TARGET_REVIEW is None and round2_params is not None: # # HACK TO GET NEW THINGS IN QUEUE # infr.params = round2_params # _iter2 = enumerate(infr.generate_reviews(**params)) # prog = ut.ProgIter(_iter2, label='round2', bs=False, adjust=False, # enabled=False) # for count, (aid1, aid2) in prog: # msg = 'reviewII #%d' % (count) # logger.info('\n----------') # logger.info(msg) # logger.info('remaining_reviews = %r' % (infr.remaining_reviews()),) # # Make the next review evidence_decision # feedback = infr.request_oracle_review(edge) # if count == TARGET_REVIEW: # infr.EMBEDME = QUIT_OR_EMEBED == 'embed' # infr.add_feedback(edge, **feedback) # # Show the result # if PRESHOW or TARGET_REVIEW is None or count >= TARGET_REVIEW - 1: # show_graph(infr, msg) # if count == TARGET_REVIEW: # break # show_graph(infr, 'post-re-review', final=True) if not getattr(infr, 'EMBEDME', False): if ut.get_computer_name().lower() in ['hyrule', 'ooo']: pt.all_figures_tile(monitor_num=0, percent_w=0.5) else: pt.all_figures_tile() ut.show_if_requested() valid_views = ['L', 'F', 'R', 'B'] adjacent_views = { v: [valid_views[(count + i) % len(valid_views)] for i in [-1, 0, 1]] for count, v in enumerate(valid_views) } def get_edge_truth(infr, n1, n2): node_dict = ut.nx_node_dict(infr.graph) nid1 = node_dict[n1]['orig_name_label'] nid2 = node_dict[n2]['orig_name_label'] try: view1 = node_dict[n1]['viewpoint'] view2 = node_dict[n2]['viewpoint'] comparable = view1 in adjacent_views[view2] except KeyError: comparable = True # raise same = nid1 == nid2 if not comparable: return 2 else: return int(same) def apply_dummy_viewpoints(infr): transition_rate = 0.5 transition_rate = 0 valid_views = ['L', 'F', 'R', 'B'] rng = np.random.RandomState(42) class MarkovView(object): def __init__(self): self.dir_ = +1 self.state = 0 def __call__(self): return self.next_state() def next_state(self): if self.dir_ == -1 and self.state <= 0: self.dir_ = +1 if self.dir_ == +1 and self.state >= len(valid_views) - 1: self.dir_ = -1 if rng.rand() < transition_rate: self.state += self.dir_ return valid_views[self.state] mkv = MarkovView() nid_to_aids = ut.group_pairs( [(n, d['name_label']) for n, d in infr.graph.nodes(data=True)] ) grouped_nodes = list(nid_to_aids.values()) node_to_view = {node: mkv() for nodes in grouped_nodes for node in nodes} infr.set_node_attrs('viewpoint', node_to_view) def make_demo_infr(ccs, edges=[], nodes=[], infer=True): """ Depricate in favor of demodata_infr """ import wbia import networkx as nx if nx.__version__.startswith('1'): nx.add_path = nx.Graph.add_path G = wbia.AnnotInference._graph_cls() G.add_nodes_from(nodes) for cc in ccs: if len(cc) == 1: G.add_nodes_from(cc) nx.add_path(G, cc, evidence_decision=POSTV, meta_decision=NULL) # for edge in edges: # u, v, d = edge if len(edge) == 3 else tuple(edge) + ({},) G.add_edges_from(edges) infr = wbia.AnnotInference.from_netx(G, infer=infer) infr.verbose = 3 infr.relabel_using_reviews(rectify=False) infr.graph.graph['dark_background'] = False infr.graph.graph['ignore_labels'] = True infr.set_node_attrs('width', 40) infr.set_node_attrs('height', 40) # infr.set_node_attrs('fontsize', fontsize) # infr.set_node_attrs('fontname', fontname) infr.set_node_attrs('fixed_size', True) return infr @profile def demodata_infr(**kwargs): """ kwargs = {} CommandLine: python -m wbia.algo.graph.demo demodata_infr --show python -m wbia.algo.graph.demo demodata_infr --num_pccs=25 python -m wbia.algo.graph.demo demodata_infr --profile --num_pccs=100 Ignore: >>> from wbia.algo.graph.demo import * # NOQA >>> from wbia.algo.graph import demo >>> import networkx as nx >>> kwargs = dict(num_pccs=6, p_incon=.5, size_std=2) >>> kwargs = ut.argparse_dict(kwargs) >>> infr =
run's metadata author_metadata (:obj:`~de_sim.simulation_metadata.AuthorMetadata`): information about the person who runs the simulation, if provided by the simulation application measurements_fh (:obj:`_io.TextIOWrapper`): file handle for debugging measurements file mem_tracker (:obj:`pympler.tracker.SummaryTracker`): a memory use tracker for debugging """ # Termination messages NO_EVENTS_REMAIN = " No events remain" END_TIME_EXCEEDED = " End time exceeded" TERMINATE_WITH_STOP_CONDITION_SATISFIED = " Terminate with stop condition satisfied" # number of rows to print in a performance profile NUM_PROFILE_ROWS = 50 def __init__(self): self.debug_logs = core.get_debug_logs() self.fast_debug_file_logger = FastLogger(self.debug_logs.get_log('de_sim.debug.file'), 'debug') self.fast_plotting_logger = FastLogger(self.debug_logs.get_log('de_sim.plot.file'), 'debug') # self.time is not known until a simulation starts self.time = None self.simulation_objects = {} self.event_queue = EventQueue() self.event_counts = Counter() self.__initialized = False def add_object(self, simulation_object): """ Add a simulation object instance to this simulation Args: simulation_object (:obj:`~de_sim.simulation_object.SimulationObject`): a simulation object instance that will be used by this simulation Raises: :obj:`SimulatorError`: if the simulation object's name is already in use """ name = simulation_object.name if name in self.simulation_objects: raise SimulatorError("cannot add simulation object '{}', name already in use".format(name)) simulation_object.set_simulator(self) self.simulation_objects[name] = simulation_object def add_objects(self, simulation_objects): """ Add multiple simulation objects to this simulation Args: simulation_objects (:obj:`iterator` of :obj:`~de_sim.simulation_object.SimulationObject`): an iterator over simulation objects """ for simulation_object in simulation_objects: self.add_object(simulation_object) def get_object(self, simulation_object_name): """ Get a simulation object used by this simulation Args: simulation_object_name (:obj:`str`): the name of a simulation object Returns: :obj:`~de_sim.simulation_object.SimulationObject`: the simulation object whose name is `simulation_object_name` Raises: :obj:`SimulatorError`: if the simulation object whose name is `simulation_object_name` is not used by this simulation """ if simulation_object_name not in self.simulation_objects: raise SimulatorError("cannot get simulation object '{}'".format(simulation_object_name)) return self.simulation_objects[simulation_object_name] def get_objects(self): """ Get all simulation object instances in this simulation Returns: :obj:`iterator` over :obj:`~de_sim.simulation_object.SimulationObject`: an iterator over all simulation object instances in this simulation """ # This is reproducible for Python 3.7 and later (see https://docs.python.org/3/whatsnew/3.7.html) # TODO(Arthur): eliminate external calls to self.simulator.simulation_objects return self.simulation_objects.values() def _delete_object(self, simulation_object): """ Delete a simulation object instance from this simulation This method should not be called by :obj:`~de_sim.simulation_object.SimulationObject`\ s. Args: simulation_object (:obj:`~de_sim.simulation_object.SimulationObject`): a simulation object instance that is part of this simulation Raises: :obj:`SimulatorError`: if the simulation object is not part of this simulation """ # prohibit calls to _delete_object while a simulation is running # more precisely, prohibit between a simulation's initialization & reset if self.__initialized: raise SimulatorError(f"cannot delete simulation object: simulator is between " f"initialize and reset") name = simulation_object.name if name not in self.simulation_objects: raise SimulatorError(f"cannot delete simulation object '{name}', it has not been added") simulation_object.del_simulator() del self.simulation_objects[name] def initialize(self): """ Initialize a simulation Call `init_before_run()` in each simulation object that has been loaded. Raises: :obj:`SimulatorError`: if the simulation has already been initialized """ if self.__initialized: raise SimulatorError('Simulation has already been initialized') for sim_obj in self.simulation_objects.values(): sim_obj.init_before_run() self.event_counts.clear() self.__initialized = True def init_metadata_collection(self, sim_config): """ Initialize this simulation's metadata object Call just before a simulation runs, so that the correct start time of the simulation is recorded Args: sim_config (:obj:`~de_sim.simulation_config.SimulationConfig`): metadata about the simulation's configuration (start time, maximum time, etc.) """ if self.author_metadata is None: author = AuthorMetadata() else: author = self.author_metadata run = RunMetadata() run.record_ip_address() run.record_start() # obtain repo metadaa, if possible simulator_repo = None try: simulator_repo, _ = get_repo_metadata(repo_type=RepoMetadataCollectionType.SCHEMA_REPO) except ValueError: pass self.sim_metadata = SimulationMetadata(simulation_config=sim_config, run=run, author=author, simulator_repo=simulator_repo) def finish_metadata_collection(self): """ Finish metadata collection: record a simulation's runtime, and write all metadata to disk """ self.sim_metadata.run.record_run_time() if self.sim_config.output_dir: SimulationMetadata.write_dataclass(self.sim_metadata, self.sim_config.output_dir) def reset(self): """ Reset this :obj:`Simulator` Delete all objects, and empty the event queue. """ self.__initialized = False for simulation_object in list(self.simulation_objects.values()): self._delete_object(simulation_object) self.event_queue.reset() self.time = None def message_queues(self): """ Return a string listing all message queues in the simulation, organized by simulation object Returns: :obj:`str`: a list of all message queues in the simulation and their messages """ now = "'uninitialized'" if self.time is not None: now = f"{self.time:6.3f}" data = [f'Event queues at {now}'] for sim_obj in sorted(self.simulation_objects.values(), key=lambda sim_obj: sim_obj.name): data.append(sim_obj.name + ':') rendered_eq = self.event_queue.render(sim_obj=sim_obj) if rendered_eq is None: data.append('Empty event queue') else: data.append(rendered_eq) data.append('') return '\n'.join(data) @staticmethod def get_sim_config(max_time=None, sim_config=None, config_dict=None): """ External simulate interface Legal combinations of the three parameters: 1. Just `max_time` 2. Just `sim_config`, which will contain an entry for `max_time` 3. Just `config_dict`, which must contain an entry for `max_time` Other combinations are illegal. Args: max_time (:obj:`float`, optional): the time of the end of the simulation sim_config (:obj:`~de_sim.simulation_config.SimulationConfig`, optional): the simulation run's configuration config_dict (:obj:`dict`, optional): a dictionary with keys chosen from the field names in :obj:`~de_sim.simulation_config.SimulationConfig`; note that `config_dict` is not a `kwargs` argument Returns: :obj:`~de_sim.simulation_config.SimulationConfig`: a validated simulation configuration Raises: :obj:`SimulatorError`: if no arguments are provided, or multiple arguments are provided, or `max_time` is missing from `config_dict` """ num_args = 0 if max_time is not None: num_args += 1 if sim_config is not None: num_args += 1 if config_dict: num_args += 1 if num_args == 0: raise SimulatorError('max_time, sim_config, or config_dict must be provided') if 1 < num_args: raise SimulatorError('at most 1 of max_time, sim_config, or config_dict may be provided') # catch common error generated when sim_config= is not used by Simulator.simulate(sim_config) if isinstance(max_time, SimulationConfig): raise SimulatorError(f"sim_config is not provided, sim_config= is probably needed") # initialize sim_config if it is not provided if sim_config is None: if max_time is not None: sim_config = SimulationConfig(max_time) else: # config_dict must be initialized if 'max_time' not in config_dict: raise SimulatorError('max_time must be provided in config_dict') sim_config = SimulationConfig(**config_dict) sim_config.validate() return sim_config SimulationReturnValue = namedtuple('SimulationReturnValue', 'num_events profile_stats', defaults=(None, None)) SimulationReturnValue.__doc__ += ': the value(s) returned by a simulation run' SimulationReturnValue.num_events.__doc__ += (": the number of times a simulation object handles an event, " "which may be smaller than the number of events sent, because simultaneous " "events at a simulation object are handled together") SimulationReturnValue.profile_stats.__doc__ += (": if performance is being profiled, a :obj:`pstats.Stats` instance " "containing the profiling statistics") def simulate(self, max_time=None, sim_config=None, config_dict=None, author_metadata=None): """ Run a simulation Exactly one of the arguments `max_time`, `sim_config`, and `config_dict` must be provided. See `get_sim_config` for additional constraints on these arguments. Args: max_time (:obj:`float`, optional): the maximum time of the end of the simulation sim_config (:obj:`~de_sim.simulation_config.SimulationConfig`, optional): a simulation run's configuration config_dict (:obj:`dict`, optional): a dictionary with keys chosen from the field names in :obj:`~de_sim.simulation_config.SimulationConfig` author_metadata (:obj:`~de_sim.simulation_metadata.AuthorMetadata`, optional): information about the person who runs the simulation; if not provided, then the their username will be obtained automatically Returns: :obj:`SimulationReturnValue`: a :obj:`SimulationReturnValue` whose fields are documented with its definition Raises: :obj:`SimulatorError`: if the simulation has not been initialized, or has no objects, or has no initial events, or attempts to execute an event that violates non-decreasing time order """ self.sim_config = self.get_sim_config(max_time=max_time, sim_config=sim_config, config_dict=config_dict) self.author_metadata = author_metadata if self.sim_config.output_dir: measurements_file = core.get_config()['de_sim']['measurements_file'] self.measurements_fh = open(os.path.join(self.sim_config.output_dir, measurements_file), 'w') print(f"de_sim measurements: {datetime.now().isoformat(' ')}", file=self.measurements_fh) profile = None if self.sim_config.profile: # profile the simulation and return the profile object with tempfile.NamedTemporaryFile() as file_like_obj: out_file = file_like_obj.name locals = {'self': self} cProfile.runctx('self._simulate()', {}, locals, filename=out_file) if self.sim_config.output_dir: profile = pstats.Stats(out_file, stream=self.measurements_fh) else: profile = pstats.Stats(out_file) profile.sort_stats('tottime').print_stats(self.NUM_PROFILE_ROWS) else: self._simulate() if self.sim_config.output_dir: self.measurements_fh.close() return self.SimulationReturnValue(self.num_handlers_called, profile) def run(self, max_time=None, sim_config=None, config_dict=None, author_metadata=None): """ Alias for `simulate` """ return self.simulate(max_time=max_time, sim_config=sim_config, config_dict=config_dict, author_metadata=author_metadata) def _simulate(self): """ Run the simulation Returns: :obj:`int`: the number of times a simulation object executes `_handle_event()`. This may be smaller than the number of events sent, because simultaneous events at one simulation object are handled together. Raises: :obj:`SimulatorError`: if the simulation has not been initialized, or has no objects, or has no initial events, or attempts to start before the start time in `time_init`, or attempts to execute an event that violates non-decreasing time order """ if not self.__initialized: raise SimulatorError("Simulation has not been initialized") if not len(self.get_objects()): raise SimulatorError("Simulation has no objects") if self.event_queue.empty(): raise SimulatorError("Simulation has no initial events") _object_mem_tracking = False if 0 < self.sim_config.object_memory_change_interval: _object_mem_tracking = True # don't import tracker unless it's being used from pympler
#!/usr/bin/env python3 """ CLI implementation to communicate with a connected Arduino based EEPROM Programmer """ import argparse import time import sys from difflib import context_diff as diff_func from io import BytesIO from typing import Sequence, Union, BinaryIO from serial import Serial from tqdm import tqdm def format_hex(in_bytes: bytes) -> Sequence[str]: """ format an input sequence of bytes into a ordered form with lines of 8 hex-formatted bytes prefixed by the address of the first byte in the line. :param in_bytes: the bytes to format :return: a list of formatted lines Example: >>> print('\n'.join(eepro.format_hex(b'0123456789ABCDEF'))) 0x0000: 30 31 32 33 34 35 36 37 0x0008: 38 39 41 42 43 44 45 46 """ chunk_size = 8 byte = "{:02X} " address = "0x{:04X}: " formatted_str = "" for i, in_byte in enumerate(in_bytes): if i % (chunk_size) == 0: formatted_str += "\n" + address.format(i) elif i % (chunk_size // 2) == 0: formatted_str += " " formatted_str += byte.format(in_byte) return formatted_str.split("\n") class FillBytes(BytesIO): """ File like object that is filled with ``length`` occurrences of ``fill_byte`` bytes. Sole purpose of the class is to overwrite the ``__repr__`` of ``io.ByteIO`` :param fill_byte: the byte to fill the file with :param length: number of bytes in the file """ def __init__(self, fill_byte: bytes, length: int): self.fill_byte = fill_byte super().__init__(fill_byte * length) def __repr__(self) -> str: return f"fill bytes (0x{self.fill_byte[0]:02X})" class EEProgrammer(Serial): """ Represents the connection to an Arduino based EEPROM programmer via a (virtual) serial port """ esc_char = b"\x1B" end_char = b"\x04" ack_char = b"\x06" def reset(self) -> None: """ Resets the connected programmer and waits for it to start back up. Useful to start in a known state """ self.dtr = True time.sleep(0.11) self.dtr = False time.sleep(2) def acknowledged_write(self, byte: bytes) -> None: """ Write a single byte to the programmer and wait for an acknowledge response. :param byte: the byte to write :raises ConnectionError: If the programmer responds with anything but an acknowledge or the operation times out """ self.write(byte) self.flush() if self.read() != EEProgrammer.ack_char: raise ConnectionError( "Programmer did not acknowledge write", self.flush_read_buffer().decode("ascii"), ) def write_file(self, file: Union[str, BinaryIO], start_address: int = 0x00) -> None: """ Write the contents of a file to the EEPROM. Any control bytes in the input file are automatically escaped before sending :param file: path to a file or an already opened file-like object in binary mode :param start_address: offset in the EEPROM where to start writing the file :raises AssertionError: if the programmer responds to have received less bytes than could be read from the file :raises ConnectionError: if the programmer fails to acknowledge any byte send to it """ self.acknowledged_write("w ".encode("ascii")) self.acknowledged_write(f"{start_address}".encode("ascii")) if isinstance(file, str): with open(file, "rb") as bin_file: raw_content = bin_file.read() else: raw_content = file.read() escaped_content = EEProgrammer.escape_file_contents(raw_content) for byte in tqdm(escaped_content, desc=f"writing {file}"): self.acknowledged_write(bytes([byte])) self.acknowledged_write(EEProgrammer.end_char) response = self.readline() bytes_written = int(response) if bytes_written != len(raw_content): raise AssertionError( f"written {bytes_written} bytes, expected {len(raw_content)}" ) def read_file(self, file: str, length: int, start_address: int = 0x00) -> None: """ read the contents of the EEPROM to a file :param file: path to a file that will be opened in binary mode (does not need to exist) :param length: number of bytes to read :param start_address: offset in the EEPROM where to start reading :raises AssertionError: if the programmer responds to have sent more bytes than could be read from the connection or the programmer sends an invalid escape sequence :raises ConnectionError: if the programmer fails to acknowledge any byte send to it """ contents = self.read_contents(start_address, length) with open(file, "wb") as outfile: outfile.write(contents) def fill(self, fill_byte: bytes, length: int, start_address: int = 0x00) -> None: """ fill an area of the EEPROM memory with a single value :param fill_byte: value to fill the EEPROM memory section :param length: length of the memory block in bytes :param start_address: offset of the memory block from the start of the EEPROM :raises AssertionError: if the programmer responds to have received less bytes than specified :raises ConnectionError: if the programmer fails to acknowledge any byte send to it """ dummy_file = FillBytes(fill_byte, length) self.write_file(dummy_file, start_address=start_address) def check_filled( self, fill_byte: bytes, length: int, start_address: int = 0x00 ) -> None: """ Check if a section of EEPROM is filled with a specific byte :param fill_byte: value that should be read by every cell in the specified memory area :param length: length of the memory block in bytes :param start_address: offset of the memory block from the start of the EEPROM :raises AssertionError: if the EEPROM reads back any byte different from the specified value :raises ConnectionError: if the programmer fails to acknowledge any byte send to it """ dummy_file = FillBytes(fill_byte, length) try: self.verify_file(dummy_file, start_address=start_address) except AssertionError as err: raise AssertionError( f"EEPROM is not filled with 0x{fill_byte:02X}", err.args[1] ) from err def verify_file( self, file: Union[str, BinaryIO], start_address: int = 0x00 ) -> None: """ Read the contents of the EEPROM and compare each byte to a specified file. The number of bytes to read is determined by the file length. :param file: path to a file that will be opened in binary mode :param start_address: offset in the EEPROM where to start reading :raises AssertionError: if the EEPROM memory differs in any byte from the specified file :raises ConnectionError: if the programmer fails to acknowledge any byte send to it """ if isinstance(file, str): with open(file, "rb") as bin_file: raw_content = bin_file.read() else: raw_content = file.read() eeprom_content = self.read_contents(start_address, len(raw_content)) if raw_content != eeprom_content: diff = diff_func( format_hex(raw_content), format_hex(eeprom_content), fromfile=repr(file), tofile="EEPROM Contents", ) raise AssertionError("Contents do not match input file", "\n".join(diff)) def read_contents(self, start_address: int, length: int) -> bytes: """ Read a section of memory from the EEPROM into a bytearray. This method automatically handles the unescapeing of control sequences for the programmer. :param start_address: offset to the beginning of the memory block to read :param length: size of the memory block to read :return: bytes object with the specified memory block contents :raises AssertionError: if the programmer repors to have sent more bytes than were received or if an invalid escape sequence is received :raises ConnectionError: if the programmer fails to acknowledge any byte send to it """ self.acknowledged_write("r ".encode("ascii")) self.acknowledged_write(f"{start_address}".encode("ascii")) self.acknowledged_write(f"{length}".encode("ascii")) content = b"" progress = tqdm(desc="reading contents", total=length) while True: char = self.read() if char == EEProgrammer.end_char: progress.close() response = self.readline() bytes_read = int(response) escaped_content = EEProgrammer.escape_file_contents(content) if bytes_read != len(escaped_content): raise AssertionError( f"read {len(escaped_content)} bytes, expected {bytes_read}" ) return content if char == EEProgrammer.esc_char: char = self.read() if char not in [EEProgrammer.end_char, EEProgrammer.esc_char]: raise AssertionError("Invalid escape sequence received") content += char progress.update() def flush_read_buffer(self) -> bytes: """ Read the serial buffer until empty and return the result :return: bytes object with the current contents of the serial read buffer """ content = b"" while True: line = self.readline() if line == b"": break content += line return content @staticmethod def escape_file_contents(contents: bytes) -> bytearray: """ Escape each control character in a sequence of bytes. :param contents: the sequence to escape (bytes or list of ints) :return: bytes of the escaped sequence """ escaped_bytes = bytearray() for byte in contents: escaped_bytes += EEProgrammer.escape_byte(bytes([byte])) return escaped_bytes @staticmethod def escape_byte(byte: bytes) -> bytes: """ Escape a single byte. For non-control characters this is a no-op. Control characters (``EEProgrammer.end_char`` and ``EEProgrammer.esc_char``) will be prefixed with an additional ``EEProgrammer.end_char``. :param byte: the byte to escape :return: a ``bytes`` object containing the escaped input """ if byte in [EEProgrammer.end_char, EEProgrammer.esc_char]: return EEProgrammer.esc_char + byte return byte def main() -> None: """ Entrypoint for the CLI application """ parser = argparse.ArgumentParser(description="Write to or read from an EEPROM") parser.add_argument( "-p", "--port", help="serial port to the programmer", required=True ) parser.add_argument("-f", "--file", help="binary file to write") parser.add_argument( "-b", "--baud", type=int, default=115200, help="baudrate for communication with the programmer", ) parser.add_argument("-s", "--size", type=int, help="size of the EEPROM in bytes") group = parser.add_mutually_exclusive_group()
211, s.hash['a.com']['cert1'][211], s.hash['a.com']['cert2'][211]), setup.call_line('p', "--not-up=311 --fail-publish", 301, s.hash['a.com']['cert1'][301]), setup.call_line('p', "--not-up=311 --fail-publish", 311, s.hash['a.com']['cert1'][311]), ] assert cl[22:] == calls # posthook renewed again, publish succeeds, delete fails sleep(sleep_time) setup.clear_state(prog) ptime6 = "{:%s}".format(prog.timenow) prog.ttl = 0 s.renew_a() prog.renewed_domains = [ 'a.com' ] retval = config.read(prog) assert retval == Prog.RetVal.ok retval = datafile.read(prog) assert retval == Prog.RetVal.ok retval = datafile.check_data(prog) assert retval == Prog.RetVal.ok for g in prog.data.groups: g.target.api.command = [ str(s.bin / 'dns'), '--fail-delete' ] retval = dane.process_data(prog) assert retval == Prog.RetVal.continue_failure retval = datafile.write_posthook(prog) assert retval == Prog.RetVal.ok with open(str(prog.datafile), 'r') as file: df = file.read().splitlines() df_lines = [] for k in df[2:]: df_lines += [ shlex.split(k) ] # note: the 2xx records are recreated because the certs were # renewed again lines = [ setup.prehook_line(s, cwd, 'a.com', 'cert1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'chain1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'fullchain1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'privkey1.pem', 1), [ 'a.com', '201', '12725', 'tcp', 'a.com', ptime6, '0', s.hash['a.com']['cert2'][201] ], [ 'a.com', '211', '12725', 'tcp', 'a.com', ptime6, '0', s.hash['a.com']['cert2'][211] ], [ 'a.com', '301', '12725', 'tcp', 'a.com', ptime6, '0', s.hash['a.com']['cert2'][301] ], [ 'a.com', '311', '12725', 'tcp', 'a.com', ptime6, '0', s.hash['a.com']['cert2'][311] ], [ 'a.com', 'delete', '311', '12725', 'tcp', 'a.com', ptime, '5', s.hash['a.com']['cert2'][311] ], [ 'a.com', 'delete', '311', '12725', 'tcp', 'a.com', ptime2, '4', s.hash['a.com']['cert3'][311] ], ] assert sorted(df_lines) == sorted(lines) with open(str(s.data / 'calls'), 'r') as file: cl = file.read().splitlines() calls = [ setup.call_line('d', "--fail-delete", 311, s.hash['a.com']['cert2'][311]), setup.call_line('d', "--fail-delete", 311, s.hash['a.com']['cert3'][311]), setup.call_line('p', "--fail-delete", 201, s.hash['a.com']['cert2'][201]), setup.call_line('p', "--fail-delete", 211, s.hash['a.com']['cert2'][211]), setup.call_line('p', "--fail-delete", 301, s.hash['a.com']['cert2'][301]), setup.call_line('p', "--fail-delete", 311, s.hash['a.com']['cert2'][311]), ] assert cl[30:] == calls # posthook ttl passed, 311 not up sleep(sleep_time) setup.clear_state(prog) ptime7 = "{:%s}".format(prog.timenow) prog.ttl = 0 retval = config.read(prog) assert retval == Prog.RetVal.ok retval = datafile.read(prog) assert retval == Prog.RetVal.ok retval = datafile.check_data(prog) assert retval == Prog.RetVal.ok for g in prog.data.groups: g.target.api.command = [ str(s.bin / 'dns'), '--not-up=311' ] retval = dane.process_data(prog) assert retval == Prog.RetVal.ok retval = datafile.write_posthook(prog) assert retval == Prog.RetVal.ok with open(str(prog.datafile), 'r') as file: df = file.read().splitlines() df_lines = [] for k in df[2:]: df_lines += [ shlex.split(k) ] lines = [ setup.prehook_line(s, cwd, 'a.com', 'cert1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'chain1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'fullchain1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'privkey1.pem', 1), # [ 'a.com', '201', '12725', 'tcp', 'a.com', ptime6, '0', # s.hash['a.com']['cert2'][201] ], # [ 'a.com', '211', '12725', 'tcp', 'a.com', ptime6, '0', # s.hash['a.com']['cert2'][211] ], # [ 'a.com', '301', '12725', 'tcp', 'a.com', ptime6, '0', # s.hash['a.com']['cert2'][301] ], [ 'a.com', '311', '12725', 'tcp', 'a.com', ptime6, '0', s.hash['a.com']['cert2'][311] ], [ 'a.com', 'delete', '311', '12725', 'tcp', 'a.com', ptime, '6', s.hash['a.com']['cert2'][311] ], [ 'a.com', 'delete', '311', '12725', 'tcp', 'a.com', ptime2, '5', s.hash['a.com']['cert3'][311] ], ] assert sorted(df_lines) == sorted(lines) with open(str(s.data / 'calls'), 'r') as file: cl = file.read().splitlines() calls = [ setup.call_line('d', "--not-up=311", 311, s.hash['a.com']['cert2'][311]), setup.call_line('d', "--not-up=311", 311, s.hash['a.com']['cert3'][311]), setup.call_line('d', "--not-up=311", 201, s.hash['a.com']['cert1'][201], s.hash['a.com']['cert2'][201]), setup.call_line('d', "--not-up=311", 211, s.hash['a.com']['cert1'][211], s.hash['a.com']['cert2'][211]), setup.call_line('d', "--not-up=311", 301, s.hash['a.com']['cert1'][301], s.hash['a.com']['cert2'][301]), setup.call_line('d', "--not-up=311", 311, s.hash['a.com']['cert1'][311], s.hash['a.com']['cert2'][311]), ] assert cl[36:] == calls # posthook (no renewal, ttl passed) sleep(sleep_time) setup.clear_state(prog) prog.ttl = 0 retval = config.read(prog) assert retval == Prog.RetVal.ok retval = datafile.read(prog) assert retval == Prog.RetVal.ok retval = datafile.check_data(prog) assert retval == Prog.RetVal.ok for g in prog.data.groups: g.target.api.command = [ str(s.bin / 'dns') ] retval = dane.process_data(prog) assert retval == Prog.RetVal.ok retval = datafile.write_posthook(prog) assert retval == Prog.RetVal.ok assert not prog.datafile.exists() with open(str(s.data / 'calls'), 'r') as file: cl = file.read().splitlines() calls = [ setup.call_line('d', "", 311, s.hash['a.com']['cert2'][311]), setup.call_line('d', "", 311, s.hash['a.com']['cert3'][311]), setup.call_line('d', "", 311, s.hash['a.com']['cert1'][311], s.hash['a.com']['cert2'][311]), ] assert cl[42:] == calls with open(str(s.data / 'user'), 'r') as f: whodata = f.read().splitlines() if os.getuid() == 0: for wd in whodata: assert wd[0:7] == "nobody:" else: name = pwd.getpwuid(os.getuid()).pw_name namelen = len(name) for wd in whodata: assert wd[0:namelen] == name def test_10_multi_renewal_2xx_up_soft_fail(): s = setup.Init(keep=True) if os.getuid() != 0: uid = None gid = None prog = setup.create_state_obj(s, config=s.config2) else: uid = pwd.getpwnam('nobody').pw_uid gid = None prog = setup.create_state_obj(s, config=s.config5) cwd = Path.cwd() with prog.log: assert not prog.log.has_errors() retval = config.read(prog) assert retval == Prog.RetVal.ok api = setup.create_api_exec_obj(str(s.bin / 'dns'), uid=uid, gid=gid) t_a1 = setup.create_tlsa_obj('201', '12725', 'tcp', 'a.com') t_a2 = setup.create_tlsa_obj('211', '12725', 'tcp', 'a.com') t_a3 = setup.create_tlsa_obj('301', '12725', 'tcp', 'a.com') t_a4 = setup.create_tlsa_obj('311', '12725', 'tcp', 'a.com') ta = setup.create_target_obj('a.com', api, [], [t_a1, t_a2, t_a3, t_a4]) assert prog.target_list == [ta] assert prog.dane_domain_directories == {} assert prog.renewed_domains == [] retval = dane.init_dane_directory(prog) assert retval == Prog.RetVal.ok assert s.dane.exists() assert Path(s.dane / 'a.com').exists() assert Path(s.dane / 'b.com').exists() assert Path(s.dane / 'c.com').exists() assert os.readlink(str(s.dane / 'a.com' / 'cert.pem')) == \ '../../le/live/a.com/cert.pem' assert os.readlink(str(s.dane / 'a.com' / 'chain.pem')) == \ '../../le/live/a.com/chain.pem' assert os.readlink(str(s.dane / 'a.com' / 'fullchain.pem')) == \ '../../le/live/a.com/fullchain.pem' assert os.readlink(str(s.dane / 'a.com' / 'privkey.pem')) == \ '../../le/live/a.com/privkey.pem' retval = dane.live_to_archive(prog) assert retval == Prog.RetVal.ok rd = {'a.com': [ 'cert.pem', 'chain.pem', 'privkey.pem', 'fullchain.pem' ]} assert len(prog.dane_domain_directories) == 1 for d in prog.dane_domain_directories: assert sorted(prog.dane_domain_directories[d]) == sorted(rd[d]) assert os.readlink(str(s.dane / 'a.com' / 'cert.pem')) == \ '../../le/archive/a.com/cert1.pem' assert os.readlink(str(s.dane / 'a.com' / 'chain.pem')) == \ '../../le/archive/a.com/chain1.pem' assert os.readlink(str(s.dane / 'a.com' / 'fullchain.pem')) == \ '../../le/archive/a.com/fullchain1.pem' assert os.readlink(str(s.dane / 'a.com' / 'privkey.pem')) == \ '../../le/archive/a.com/privkey1.pem' retval = datafile.write_prehook(prog) assert retval == Prog.RetVal.ok with open(str(prog.datafile), 'r') as file: df = file.read().splitlines() df_lines = [] for k in df[2:]: df_lines += [ shlex.split(k) ] lines = [ setup.prehook_line(s, cwd, 'a.com', 'cert1.pem', 0), setup.prehook_line(s, cwd, 'a.com', 'chain1.pem', 0), setup.prehook_line(s, cwd, 'a.com', 'fullchain1.pem', 0), setup.prehook_line(s, cwd, 'a.com', 'privkey1.pem', 0), ] assert sorted(df_lines) == sorted(lines) # posthook (certs renewed) -- publish succeeds setup.clear_state(prog) ptime = "{:%s}".format(prog.timenow) retval = config.read(prog) assert retval == Prog.RetVal.ok s.renew_a() prog.renewed_domains = [ 'a.com' ] retval = datafile.read(prog) assert retval == Prog.RetVal.ok retval = datafile.check_data(prog) assert retval == Prog.RetVal.ok for g in prog.data.groups: g.target.api.command = [ str(s.bin / 'dns'), '--is-up=201:211' ] retval = dane.process_data(prog) assert retval == Prog.RetVal.ok retval = datafile.write_posthook(prog) assert retval == Prog.RetVal.ok with open(str(prog.datafile), 'r') as file: df = file.read().splitlines() df_lines = [] for k in df[2:]: df_lines += [ shlex.split(k) ] lines = [ setup.prehook_line(s, cwd, 'a.com', 'cert1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'chain1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'fullchain1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'privkey1.pem', 1), [ 'a.com', '301', '12725', 'tcp', 'a.com', ptime, '0', s.hash['a.com']['cert2'][301] ], [ 'a.com', '311', '12725', 'tcp', 'a.com', ptime, '0', s.hash['a.com']['cert2'][311] ], ] assert sorted(df_lines) == sorted(lines) with open(str(s.data / 'calls'), 'r') as file: cl = file.read().splitlines() calls = [ setup.call_line('p', "--is-up=201:211", 201, s.hash['a.com']['cert2'][201]), setup.call_line('p', "--is-up=201:211", 211, s.hash['a.com']['cert2'][211]), setup.call_line('p', "--is-up=201:211", 301, s.hash['a.com']['cert2'][301]), setup.call_line('p', "--is-up=201:211", 311, s.hash['a.com']['cert2'][311]), ] assert cl == calls # posthook renewal again, publish succeeds, delete fails sleep(sleep_time) setup.clear_state(prog) ptime2 = "{:%s}".format(prog.timenow) prog.ttl = 0 s.renew_a() prog.renewed_domains = [ 'a.com' ] retval = config.read(prog) assert retval == Prog.RetVal.ok retval = datafile.read(prog) assert retval == Prog.RetVal.ok retval = datafile.check_data(prog) assert retval == Prog.RetVal.ok for g in prog.data.groups: g.target.api.command = [ str(s.bin / 'dns'), '--fail-delete' ] retval = dane.process_data(prog) assert retval == Prog.RetVal.continue_failure retval = datafile.write_posthook(prog) assert retval == Prog.RetVal.ok with open(str(prog.datafile), 'r') as file: df = file.read().splitlines() df_lines = [] for k in df[2:]: df_lines += [ shlex.split(k) ] lines = [ setup.prehook_line(s, cwd, 'a.com', 'cert1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'chain1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'fullchain1.pem', 1), setup.prehook_line(s, cwd, 'a.com', 'privkey1.pem', 1), [ 'a.com', '201', '12725', 'tcp', 'a.com', ptime2, '0', s.hash['a.com']['cert2'][201] ], [ 'a.com', '211', '12725', 'tcp', 'a.com', ptime2, '0', s.hash['a.com']['cert2'][211] ], [ 'a.com', '301', '12725', 'tcp', 'a.com', ptime2, '0', s.hash['a.com']['cert3'][301] ], [ 'a.com', '311', '12725', 'tcp', 'a.com', ptime2, '0', s.hash['a.com']['cert3'][311] ], [ 'a.com', 'delete', '301', '12725', 'tcp', 'a.com', ptime, '1', s.hash['a.com']['cert2'][301] ], [ 'a.com', 'delete', '311', '12725', 'tcp', 'a.com', ptime, '1', s.hash['a.com']['cert2'][311] ], ] assert sorted(df_lines) == sorted(lines) with open(str(s.data / 'calls'), 'r') as file: cl = file.read().splitlines() calls = [ setup.call_line('p', "--fail-delete", 201, s.hash['a.com']['cert3'][201]), setup.call_line('p', "--fail-delete", 211, s.hash['a.com']['cert3'][211]), setup.call_line('p', "--fail-delete", 301, s.hash['a.com']['cert3'][301]), setup.call_line('p', "--fail-delete", 311, s.hash['a.com']['cert3'][311]), setup.call_line('d', "--fail-delete", 301, s.hash['a.com']['cert2'][301]), setup.call_line('d', "--fail-delete", 311, s.hash['a.com']['cert2'][311]), ] assert cl[4:] == calls # posthook renewal again, publish fails, delete fails sleep(sleep_time) setup.clear_state(prog) ptime3 = "{:%s}".format(prog.timenow) prog.ttl = 0 s.renew_a() prog.renewed_domains = [ 'a.com'
# mcu.py # a comprehensive library for communicating with the MATE OQBot. # please read docs/packet_structure.md and docs/command_list.md for more information. # there is a CLI for testing available at mcu_cli.py. read docs/mcu_cli.md for usage. import serial import struct import time import threading from queue import Queue, Full from mcu_lib.packets import * from mcu_lib.command_constants import * BYTESTRING_ZERO = chr(0x00).encode('latin') FORWARD_PACKET_SIZE = 8 RETURN_PACKET_SIZE = 10 MAX_QUEUE_SIZE = 512 # bs ("byte-string") # converts an int into a single character bytestring def bs(v: int) -> bytes: return chr(v).encode('latin') # converts a signed int8 to unsigned int8 def to_unsigned_int8(signed_int8: int) -> int: assert -128 <= signed_int8 <= 127 neg = signed_int8 < 0 if neg: return signed_int8 + 0xFF return signed_int8 class MCUInterface: """ MCUInterface - Layer for interfacing between a Python program and the microcontroller over Serial. Attributes: latest_accel : List[float] - latest acceleration values, [x, y, z]. latest_gyro : List[float] - latest angular velocity values, [x, y, z]. latest_voltage : float - latest voltage on the 12V rail. latest_temp : float - latest temperature measured within the electronics box. test_queue : Queue[TestPacket] - Queue to access all received packets of TestPacket type. ok_queue : Queue[OKPacket] - Queue to access all received packets of OKPacket type. accel_queue : Queue[AccelPacket] - Queue to access all received packets of AccelPacket type. gyro_queue : Queue[GyroPacket] - Queue to access all received packets of GyroPacket type. volt_temp_queue : Queue[VoltageTemperaturePacket] - Queue to access all received packets of that type. Methods: get_port() -> str returns the interface's serial.Serial object's port. get_baud() -> str returns the interface's serial.Serial Object's baudrate. open_serial() opens the integrated serial interface. close_serial() closes the integrated serial interface. cmd_{PACKET_COMMAND}() refer to docs/command_list.md. most should be self-explanatory. """ def __init__(self, port: str, baud: int = 230400, close_on_startup: bool = True, refresh_rate: int = 1440, max_read: int = 16): """ Default and only constructor for the MCUInterface class. :param port: Port to connect to. :param baud: Baudrate to connect at. :param close_on_startup: Whether to close the serial object at startup, so it can be opened later. :param refresh_rate: Number of times per seconds to refresh the serial cache for packets. :param max_read: Maximum number of bytes for the serial object to read at once. Used to mitigate strange pySerial behavior. """ self.__serial = serial.Serial(port, baud, timeout=0, write_timeout=0) self.__queue = Queue() self.__fetch_thread = threading.Thread(target=self.__read_serial) self.__parse_thread = threading.Thread(target=self.__parse_serial) self.__write_thread = threading.Thread(target=self.__write_packets) self.__refresh_time = 1 / refresh_rate self.__thread_enable = False self.__init_queues() self.__write_queue = Queue() self.__wait_half_byte_time = 4 / baud self.latest_accel = [0.0, 0.0, 0.0] self.latest_gyro = [0.0, 0.0, 0.0] self.latest_linear_accel = [0.0, 0.0, 0.0] self.latest_orientation = [0.0, 0.0, 0.0] self.latest_voltage = 0 self.latest_temp = 0 self.latest_motor_status: MotorStatusPacket = MotorStatusPacket((0, 0, 0, 0), 0, 0) self.current_calibration = IMUCalibrationPacket((0, 0, 0, 0), 0) self.read_size = max_read if close_on_startup: self.__serial.close() def __init_queues(self): self.test_queue = Queue(MAX_QUEUE_SIZE) self.ok_queue = Queue(MAX_QUEUE_SIZE) self.accel_queue = Queue(MAX_QUEUE_SIZE) self.gyro_queue = Queue(MAX_QUEUE_SIZE) self.linear_accel_queue = Queue(MAX_QUEUE_SIZE) self.orientation_queue = Queue(MAX_QUEUE_SIZE) self.volt_temp_queue = Queue(MAX_QUEUE_SIZE) self.motor_queue = Queue(MAX_QUEUE_SIZE) def get_port(self) -> str: """ Get the integrated serial object's port. :return: the port. """ return self.__serial.port def get_baud(self) -> int: """ Get the integrated serial object's baudrate. :return: the baudrate. """ return self.__serial.baudrate def open_serial(self): """ Opens the integrated serial's connection to the microcontroller. """ self.__serial.open() while not self.__serial.is_open: time.sleep(0.01) self.__thread_enable = True self.__fetch_thread.start() self.__parse_thread.start() self.__write_thread.start() def __read_serial(self): while self.__thread_enable: try: byte_string = self.__serial.read(size=self.read_size) for byte in byte_string: self.__queue.put(bs(byte)) except serial.SerialException: pass time.sleep(self.__refresh_time) def __parse_serial(self): while self.__thread_enable: if self.__queue.qsize() >= RETURN_PACKET_SIZE: packet = self.__read_packet() if packet: self.__parse_packet(packet) time.sleep(self.__refresh_time) def close_serial(self): """ Closes the integrated serial's connection to the microcontroller. """ self.__thread_enable = False self.__fetch_thread.join() self.__parse_thread.join() self.__write_thread.join() self.__serial.close() def __read_packet(self): # returns a generic ReturnPacket next_byte = self.__queue.get() while next_byte != bs(RETURN_HEADER) and self.__queue.qsize() >= RETURN_PACKET_SIZE: next_byte = self.__queue.get() if self.__queue.qsize() < RETURN_PACKET_SIZE - 1: return packet_data = [] for i in range(RETURN_PACKET_SIZE - 1): # 0x1 to 0x9 packet_data.append(self.__queue.get()) if packet_data[-1] != bs(RETURN_FOOTER): # invalid packet return packet = ReturnPacket(packet_data) return packet def __write_packets(self): while self.__thread_enable: try: pkt = self.__write_queue.get() self.__serial.write(pkt) time.sleep(self.__wait_half_byte_time) except serial.SerialTimeoutException: pass def __empty_queue(self, queue): while not queue.empty(): queue.get() def __parse_packet(self, packet: ReturnPacket): data_bs = packet.data[0] + packet.data[1] + packet.data[2] + packet.data[3] if not packet: return # let's all pretend this was a Python 3.10+ match/case statement if packet.cmd == bs(RETURN_TEST): # test version = int.from_bytes(packet.data[0], 'big') contents = (packet.data[1] + packet.data[2] + packet.data[3]).decode('latin') valid = contents == "pog" if self.test_queue.qsize() <= MAX_QUEUE_SIZE - 1: self.test_queue.put_nowait(TestPacket(valid, version, contents, packet.timestamp)) elif packet.cmd == bs(RETURN_OK): # OK og_cmd = int.from_bytes(packet.og_cmd, 'big') og_param = int.from_bytes(packet.og_param, 'big') success = int.from_bytes(packet.param, 'big') > 0 if self.ok_queue.qsize() <= MAX_QUEUE_SIZE - 1: self.ok_queue.put_nowait(OKPacket(og_cmd, og_param, success, packet.timestamp)) elif packet.cmd == bs(RETURN_ACCELEROMETER): # accel axis = int.from_bytes(packet.param, 'big') // AXIS_DIVISOR value = struct.unpack('f', data_bs)[0] if self.accel_queue.qsize() <= MAX_QUEUE_SIZE - 1: self.accel_queue.put_nowait(AccelPacket(axis, value, packet.timestamp)) self.latest_accel[axis] = value elif packet.cmd == bs(RETURN_GYROSCOPE): # gyro axis = int.from_bytes(packet.param, 'big') // AXIS_DIVISOR value = struct.unpack('f', data_bs)[0] if self.gyro_queue.qsize() <= MAX_QUEUE_SIZE - 1: self.gyro_queue.put_nowait(GyroPacket(axis, value, packet.timestamp)) self.latest_gyro[axis] = value elif packet.cmd == bs(RETURN_LINEAR_ACCEL): axis = int.from_bytes(packet.param, 'big') // AXIS_DIVISOR value = struct.unpack('f', data_bs)[0] if self.linear_accel_queue.qsize() <= MAX_QUEUE_SIZE - 1: self.linear_accel_queue.put_nowait(LinearAccelPacket(axis, value, packet.timestamp)) self.latest_linear_accel[axis] = value elif packet.cmd == bs(RETURN_ORIENTATION): axis = int.from_bytes(packet.param, 'big') // AXIS_DIVISOR value = struct.unpack('f', data_bs)[0] if self.orientation_queue.qsize() <= MAX_QUEUE_SIZE - 1: self.orientation_queue.put_nowait(OrientationPacket(axis, value, packet.timestamp)) self.latest_orientation[axis] = value elif packet.cmd == bs(RETURN_VOLT_TEMP): # temp/volt temp, volts = struct.unpack('HH', data_bs) temp /= 100 volts /= 100 self.latest_temp = temp self.latest_voltage = volts if self.volt_temp_queue.qsize() <= MAX_QUEUE_SIZE - 1: self.volt_temp_queue.put_nowait(VoltageTemperaturePacket(volts, temp, packet.timestamp)) elif packet.cmd == bs(RETURN_MOTOR): # motor status servo = struct.unpack('b', packet.param)[0] motors = struct.unpack('bbbb', data_bs) packet = MotorStatusPacket(motors, servo, packet.timestamp) if self.motor_queue.qsize() <= MAX_QUEUE_SIZE - 1: self.motor_queue.put_nowait(packet) self.latest_motor_status = packet elif packet.cmd == bs(RETURN_IMU_CALIBRATIONS): calibrations = struct.unpack('bbbb', data_bs) self.current_calibration = IMUCalibrationPacket(calibrations, packet.timestamp) else: print(f"Invalid packet received! Command: {packet.cmd}, Param: {packet.param}, Data: {packet.data}") def __send_packet(self, cmd: int, param: int, data: bytes): assert len(data) == 4, "data is not 4 bytes long!" packet = bs(FORWARD_HEADER) + bs(cmd) + bs(param) + data + bs(FORWARD_FOOTER) assert len(packet) == 8, "packet is not 8 bytes long!" self.__write_queue.put(packet) def cmd_test(self): self.__send_packet(COMMAND_TEST, PARAM_TEST_SYSTEM, BYTESTRING_ZERO * 4) def cmd_halt(self): self.__send_packet(COMMAND_HALT, PARAM_ZERO, BYTESTRING_ZERO * 4) def cmd_setMotorMicroseconds(self, motor: int, microseconds: int): assert 0 <= motor <= NUM_MOTORS - 1, f"There are only {NUM_MOTORS} motors" data = bs(microseconds // 0xFF) + bs(microseconds % 0xFF) + BYTESTRING_ZERO * 2 self.__send_packet(COMMAND_SET_MOTOR_MICROSECONDS, motor, data) def cmd_setMotorCalibrated(self, motor: int, percent: int): assert 0 <= motor <= NUM_MOTORS - 1, f"There are only {NUM_MOTORS} motors" assert -100 <= percent <= 100, "Calibrated % Range is [-100, 100]" data = bs(to_unsigned_int8(percent)) + BYTESTRING_ZERO * 3 self.__send_packet(COMMAND_SET_MOTOR_CALIBRATED, motor, data) def cmd_setMotorCalibration(self, motor: int, value: int): assert 0 <= motor <= NUM_MOTORS - 1, f"There are only {NUM_MOTORS} motors" assert 0 <= value <= 4000, "Calibration range is [0, 4000] where 1000 is normal" data = bs(value // 0xFF) + bs(value % 0xFF) + BYTESTRING_ZERO * 2 self.__send_packet(COMMAND_SET_MOTOR_CALIBRATION, motor, data) def cmd_getIMU(self, device: int): assert device == PARAM_ACCEL or device == PARAM_GYRO \ or device == PARAM_LINEAR_ACCEL or device == PARAM_ORIENTATION, "invalid device!" self.__send_packet(COMMAND_GET_IMU, device, BYTESTRING_ZERO * 4) def cmd_setAccelSettings(self, range: int, divisor: int, use_deprecated=False): if not use_deprecated: print("cmd_setAccelSettings is deprecated as of $VERSION=2!") return assert 0 <= range <= 3, "invalid range!" assert 1 <= divisor <= 0xFF, "invalid divisor!" data = bs(range) + bs(divisor) + BYTESTRING_ZERO * 2 self.__send_packet(COMMAND_SET_ACCEL_SETTINGS, PARAM_ACCEL, data) def cmd_setGyroSettings(self, range: int, divisor: int, use_deprecated=False): if not use_deprecated: print("cmd_setGyroSettings is deprecated as of $VERSION=2!") return assert 0 <= range <= 3, "invalid range!" assert 1 <= divisor <= 0xFF, "invalid divisor!" data = bs(range) + bs(divisor) + BYTESTRING_ZERO * 2 self.__send_packet(COMMAND_SET_GYRO_SETTINGS, PARAM_GYRO, data) def cmd_getVoltageAndTemperature(self): self.__send_packet(COMMAND_GET_VOLT_TEMP, PARAM_VOLT_TEMP, BYTESTRING_ZERO * 4) def cmd_setVoltageCalibration(self, calibration: float): self.__send_packet(COMMAND_SET_VOLTAGE_CALIBRATION, PARAM_VOLT_TEMP, struct.pack('f', calibration)) def cmd_setAutoReport(self, device, enabled: bool, delay: int): assert device == PARAM_ACCEL or device == PARAM_GYRO or device == PARAM_VOLT_TEMP \ or device == PARAM_ORIENTATION or device == PARAM_LINEAR_ACCEL, "invalid device!" assert 0 <= delay <= 0xFFFF, "invalid
#!/usr/bin/env python3 from concurrent import futures import time import math import logging import argparse import sys import threading import time import grpc import rospy import roslib.message import ros_pb2 as ros_pb import ros_pb2_grpc as ros_grpc class image_view_outputServicer(ros_grpc.image_view_outputServicer): def __init__(self): self.pub = None self.Msg = roslib.message.get_message_class('sensor_msgs/Image') def Publish(self, pb_msg, context): if self.pub == None: self.pub = rospy.Publisher('/image_view/output', self.Msg, queue_size=10) ros_msg = self.Msg() ros_msg.header.seq = pb_msg.header.seq ros_msg.header.stamp.secs = pb_msg.header.stamp.secs ros_msg.header.stamp.nsecs = pb_msg.header.stamp.nsecs ros_msg.header.frame_id = pb_msg.header.frame_id ros_msg.height = pb_msg.height ros_msg.width = pb_msg.width ros_msg.encoding = pb_msg.encoding ros_msg.is_bigendian = pb_msg.is_bigendian ros_msg.step = pb_msg.step ros_msg.data = pb_msg.data self.pub.publish(ros_msg) return ros_pb.Empty() def Subscribe(self, request, context): c = {'unsubscribed': False} ros_messages = [] def callback(ros_msg): ros_messages.append(ros_msg) subscription = rospy.Subscriber('/image_view/output', self.Msg, callback) def on_rpc_done(): c['unsubscribed'] = True print("Attempting to regain servicer thread...", c) subscription.unregister() context.add_callback(on_rpc_done) while not c['unsubscribed']: while ros_messages: ros_msg = ros_messages.pop(0) pb_msg = ros_pb.sensor_msgs.Image() pb_msg.header.seq = ros_msg.header.seq pb_msg.header.stamp.secs = ros_msg.header.stamp.secs pb_msg.header.stamp.nsecs = ros_msg.header.stamp.nsecs pb_msg.header.frame_id = ros_msg.header.frame_id pb_msg.height = ros_msg.height pb_msg.width = ros_msg.width pb_msg.encoding = ros_msg.encoding pb_msg.is_bigendian = ros_msg.is_bigendian pb_msg.step = ros_msg.step pb_msg.data = ros_msg.data yield pb_msg rospy.sleep(0.01) class image_view_parameter_descriptionsServicer(ros_grpc.image_view_parameter_descriptionsServicer): def __init__(self): self.pub = None self.Msg = roslib.message.get_message_class('dynamic_reconfigure/ConfigDescription') def Publish(self, pb_msg, context): if self.pub == None: self.pub = rospy.Publisher('/image_view/parameter_descriptions', self.Msg, queue_size=10) ros_msg = self.Msg() for pb_msg_ in pb_msg.groups: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/Group')() ros_msg_.name = pb_msg_.name ros_msg_.type = pb_msg_.type for pb_msg__ in pb_msg_.parameters: ros_msg__ = roslib.message.get_message_class('dynamic_reconfigure/ParamDescription')() ros_msg__.name = pb_msg__.name ros_msg__.type = pb_msg__.type ros_msg__.level = pb_msg__.level ros_msg__.description = pb_msg__.description ros_msg__.edit_method = pb_msg__.edit_method ros_msg_.parameters.append(ros_msg__) ros_msg_.parent = pb_msg_.parent ros_msg_.id = pb_msg_.id ros_msg.groups.append(ros_msg_) for pb_msg_ in pb_msg.max.bools: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.max.bools.append(ros_msg_) for pb_msg_ in pb_msg.max.ints: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.max.ints.append(ros_msg_) for pb_msg_ in pb_msg.max.strs: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.max.strs.append(ros_msg_) for pb_msg_ in pb_msg.max.doubles: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.max.doubles.append(ros_msg_) for pb_msg_ in pb_msg.max.groups: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')() ros_msg_.name = pb_msg_.name ros_msg_.state = pb_msg_.state ros_msg_.id = pb_msg_.id ros_msg_.parent = pb_msg_.parent ros_msg.max.groups.append(ros_msg_) for pb_msg_ in pb_msg.min.bools: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.min.bools.append(ros_msg_) for pb_msg_ in pb_msg.min.ints: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.min.ints.append(ros_msg_) for pb_msg_ in pb_msg.min.strs: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.min.strs.append(ros_msg_) for pb_msg_ in pb_msg.min.doubles: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.min.doubles.append(ros_msg_) for pb_msg_ in pb_msg.min.groups: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')() ros_msg_.name = pb_msg_.name ros_msg_.state = pb_msg_.state ros_msg_.id = pb_msg_.id ros_msg_.parent = pb_msg_.parent ros_msg.min.groups.append(ros_msg_) for pb_msg_ in pb_msg.dflt.bools: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.dflt.bools.append(ros_msg_) for pb_msg_ in pb_msg.dflt.ints: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.dflt.ints.append(ros_msg_) for pb_msg_ in pb_msg.dflt.strs: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.dflt.strs.append(ros_msg_) for pb_msg_ in pb_msg.dflt.doubles: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.dflt.doubles.append(ros_msg_) for pb_msg_ in pb_msg.dflt.groups: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')() ros_msg_.name = pb_msg_.name ros_msg_.state = pb_msg_.state ros_msg_.id = pb_msg_.id ros_msg_.parent = pb_msg_.parent ros_msg.dflt.groups.append(ros_msg_) self.pub.publish(ros_msg) return ros_pb.Empty() def Subscribe(self, request, context): c = {'unsubscribed': False} ros_messages = [] def callback(ros_msg): ros_messages.append(ros_msg) subscription = rospy.Subscriber('/image_view/parameter_descriptions', self.Msg, callback) def on_rpc_done(): c['unsubscribed'] = True print("Attempting to regain servicer thread...", c) subscription.unregister() context.add_callback(on_rpc_done) while not c['unsubscribed']: while ros_messages: ros_msg = ros_messages.pop(0) pb_msg = ros_pb.dynamic_reconfigure.ConfigDescription() for ros_msg_ in ros_msg.groups: pb_msg_ = ros_pb.dynamic_reconfigure.Group() pb_msg_.name = ros_msg_.name pb_msg_.type = ros_msg_.type for ros_msg__ in ros_msg_.parameters: pb_msg__ = ros_pb.dynamic_reconfigure.ParamDescription() pb_msg__.name = ros_msg__.name pb_msg__.type = ros_msg__.type pb_msg__.level = ros_msg__.level pb_msg__.description = ros_msg__.description pb_msg__.edit_method = ros_msg__.edit_method pb_msg_.parameters.append(pb_msg__) pb_msg_.parent = ros_msg_.parent pb_msg_.id = ros_msg_.id pb_msg.groups.append(pb_msg_) for ros_msg_ in ros_msg.max.bools: pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.max.bools.append(pb_msg_) for ros_msg_ in ros_msg.max.ints: pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.max.ints.append(pb_msg_) for ros_msg_ in ros_msg.max.strs: pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.max.strs.append(pb_msg_) for ros_msg_ in ros_msg.max.doubles: pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.max.doubles.append(pb_msg_) for ros_msg_ in ros_msg.max.groups: pb_msg_ = ros_pb.dynamic_reconfigure.GroupState() pb_msg_.name = ros_msg_.name pb_msg_.state = ros_msg_.state pb_msg_.id = ros_msg_.id pb_msg_.parent = ros_msg_.parent pb_msg.max.groups.append(pb_msg_) for ros_msg_ in ros_msg.min.bools: pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.min.bools.append(pb_msg_) for ros_msg_ in ros_msg.min.ints: pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.min.ints.append(pb_msg_) for ros_msg_ in ros_msg.min.strs: pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.min.strs.append(pb_msg_) for ros_msg_ in ros_msg.min.doubles: pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.min.doubles.append(pb_msg_) for ros_msg_ in ros_msg.min.groups: pb_msg_ = ros_pb.dynamic_reconfigure.GroupState() pb_msg_.name = ros_msg_.name pb_msg_.state = ros_msg_.state pb_msg_.id = ros_msg_.id pb_msg_.parent = ros_msg_.parent pb_msg.min.groups.append(pb_msg_) for ros_msg_ in ros_msg.dflt.bools: pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.dflt.bools.append(pb_msg_) for ros_msg_ in ros_msg.dflt.ints: pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.dflt.ints.append(pb_msg_) for ros_msg_ in ros_msg.dflt.strs: pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.dflt.strs.append(pb_msg_) for ros_msg_ in ros_msg.dflt.doubles: pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.dflt.doubles.append(pb_msg_) for ros_msg_ in ros_msg.dflt.groups: pb_msg_ = ros_pb.dynamic_reconfigure.GroupState() pb_msg_.name = ros_msg_.name pb_msg_.state = ros_msg_.state pb_msg_.id = ros_msg_.id pb_msg_.parent = ros_msg_.parent pb_msg.dflt.groups.append(pb_msg_) yield pb_msg rospy.sleep(0.01) class image_view_parameter_updatesServicer(ros_grpc.image_view_parameter_updatesServicer): def __init__(self): self.pub = None self.Msg = roslib.message.get_message_class('dynamic_reconfigure/Config') def Publish(self, pb_msg, context): if self.pub == None: self.pub = rospy.Publisher('/image_view/parameter_updates', self.Msg, queue_size=10) ros_msg = self.Msg() for pb_msg_ in pb_msg.bools: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.bools.append(ros_msg_) for pb_msg_ in pb_msg.ints: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.ints.append(ros_msg_) for pb_msg_ in pb_msg.strs: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.strs.append(ros_msg_) for pb_msg_ in pb_msg.doubles: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')() ros_msg_.name = pb_msg_.name ros_msg_.value = pb_msg_.value ros_msg.doubles.append(ros_msg_) for pb_msg_ in pb_msg.groups: ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')() ros_msg_.name = pb_msg_.name ros_msg_.state = pb_msg_.state ros_msg_.id = pb_msg_.id ros_msg_.parent = pb_msg_.parent ros_msg.groups.append(ros_msg_) self.pub.publish(ros_msg) return ros_pb.Empty() def Subscribe(self, request, context): c = {'unsubscribed': False} ros_messages = [] def callback(ros_msg): ros_messages.append(ros_msg) subscription = rospy.Subscriber('/image_view/parameter_updates', self.Msg, callback) def on_rpc_done(): c['unsubscribed'] = True print("Attempting to regain servicer thread...", c) subscription.unregister() context.add_callback(on_rpc_done) while not c['unsubscribed']: while ros_messages: ros_msg = ros_messages.pop(0) pb_msg = ros_pb.dynamic_reconfigure.Config() for ros_msg_ in ros_msg.bools: pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.bools.append(pb_msg_) for ros_msg_ in ros_msg.ints: pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.ints.append(pb_msg_) for ros_msg_ in ros_msg.strs: pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.strs.append(pb_msg_) for ros_msg_ in ros_msg.doubles: pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter() pb_msg_.name = ros_msg_.name pb_msg_.value = ros_msg_.value pb_msg.doubles.append(pb_msg_) for ros_msg_ in ros_msg.groups: pb_msg_ = ros_pb.dynamic_reconfigure.GroupState() pb_msg_.name = ros_msg_.name pb_msg_.state = ros_msg_.state pb_msg_.id = ros_msg_.id pb_msg_.parent = ros_msg_.parent pb_msg.groups.append(pb_msg_) yield pb_msg rospy.sleep(0.01) class rosoutServicer(ros_grpc.rosoutServicer): def __init__(self): self.pub = None self.Msg = roslib.message.get_message_class('rosgraph_msgs/Log') def Publish(self, pb_msg, context): if self.pub == None: self.pub = rospy.Publisher('/rosout', self.Msg, queue_size=10) ros_msg = self.Msg() ros_msg.header.seq = pb_msg.header.seq ros_msg.header.stamp.secs = pb_msg.header.stamp.secs ros_msg.header.stamp.nsecs = pb_msg.header.stamp.nsecs ros_msg.header.frame_id = pb_msg.header.frame_id ros_msg.level = pb_msg.level ros_msg.name = pb_msg.name ros_msg.msg = pb_msg.msg ros_msg.file = pb_msg.file ros_msg.function = pb_msg.function ros_msg.line = pb_msg.line for pb_msg_ in pb_msg.topics: ros_msg.topics.append(pb_msg_) self.pub.publish(ros_msg) return ros_pb.Empty() def Subscribe(self, request, context): c = {'unsubscribed': False} ros_messages = [] def callback(ros_msg): ros_messages.append(ros_msg) subscription = rospy.Subscriber('/rosout', self.Msg, callback) def on_rpc_done(): c['unsubscribed'] = True print("Attempting to regain servicer thread...", c) subscription.unregister() context.add_callback(on_rpc_done) while not c['unsubscribed']: while ros_messages: ros_msg = ros_messages.pop(0) pb_msg = ros_pb.rosgraph_msgs.Log() pb_msg.header.seq = ros_msg.header.seq pb_msg.header.stamp.secs = ros_msg.header.stamp.secs pb_msg.header.stamp.nsecs = ros_msg.header.stamp.nsecs pb_msg.header.frame_id = ros_msg.header.frame_id pb_msg.level = ros_msg.level pb_msg.name = ros_msg.name pb_msg.msg = ros_msg.msg pb_msg.file = ros_msg.file pb_msg.function = ros_msg.function pb_msg.line = ros_msg.line for ros_msg_ in ros_msg.topics: pb_msg.topics.append(ros_msg_) yield pb_msg rospy.sleep(0.01) class rosout_aggServicer(ros_grpc.rosout_aggServicer): def __init__(self): self.pub = None self.Msg = roslib.message.get_message_class('rosgraph_msgs/Log') def Publish(self, pb_msg, context): if self.pub == None: self.pub = rospy.Publisher('/rosout_agg', self.Msg, queue_size=10) ros_msg = self.Msg() ros_msg.header.seq = pb_msg.header.seq ros_msg.header.stamp.secs = pb_msg.header.stamp.secs ros_msg.header.stamp.nsecs = pb_msg.header.stamp.nsecs ros_msg.header.frame_id = pb_msg.header.frame_id ros_msg.level = pb_msg.level ros_msg.name = pb_msg.name ros_msg.msg = pb_msg.msg ros_msg.file = pb_msg.file ros_msg.function = pb_msg.function ros_msg.line = pb_msg.line for pb_msg_ in pb_msg.topics: ros_msg.topics.append(pb_msg_) self.pub.publish(ros_msg) return ros_pb.Empty() def Subscribe(self, request, context): c = {'unsubscribed': False} ros_messages = [] def callback(ros_msg): ros_messages.append(ros_msg) subscription = rospy.Subscriber('/rosout_agg', self.Msg, callback) def on_rpc_done(): c['unsubscribed'] = True print("Attempting to regain servicer thread...", c) subscription.unregister() context.add_callback(on_rpc_done) while not c['unsubscribed']: while ros_messages: ros_msg = ros_messages.pop(0) pb_msg = ros_pb.rosgraph_msgs.Log() pb_msg.header.seq = ros_msg.header.seq pb_msg.header.stamp.secs = ros_msg.header.stamp.secs pb_msg.header.stamp.nsecs = ros_msg.header.stamp.nsecs pb_msg.header.frame_id = ros_msg.header.frame_id pb_msg.level = ros_msg.level pb_msg.name = ros_msg.name pb_msg.msg = ros_msg.msg pb_msg.file = ros_msg.file pb_msg.function = ros_msg.function pb_msg.line = ros_msg.line for ros_msg_ in
form.cleaned_data['comment_author'] comment_author_email = form.cleaned_data['comment_author_email'] article_id = form.cleaned_data['article'] article = Article.objects.get(pk=article_id) parent_id = form.cleaned_data['parent'] if not parent_id: parent = None else: parent = Comment.objects.get(pk=parent_id) parent_comment_date = parent.comment_date.strftime(timeformat) comment_author_ip = form.cleaned_data['comment_author_ip'] comm = Comment.objects.create(comment=comment, user=user, comment_author=comment_author, comment_author_email=comment_author_email, article=article, parent=parent, comment_author_ip=comment_author_ip, comment_status=comment_status) comm.save() if comm.comment_status == '1': article.comment_count = article.comment_count + 1 article.save(update_fields=['comment_count']) comment_time = comm.comment_date.strftime(timeformat) if user and parent: jsondata = {"id": comm.pk, "comment": comm.comment, "comment_author": comm.comment_author, "comment_date": comment_time, "avatar": str(user.avatar), "parent_author": parent.comment_author, "parent_comment_date": parent_comment_date, "parent_comment": parent.comment, "comment_status": comment_status, "comment_count": article.comment_count, "comment_author_ip": comm.comment_author_ip, "article_guid": article.guid, "article_title": article.title, "comment_author_email": comm.comment_author_email, "article_id": article.pk} elif parent: jsondata = {"id": comm.pk, "comment": comm.comment, "comment_author": comm.comment_author, "comment_date": comment_time, "avatar": "/static/assets/avatars/avatar.png", "parent_author": parent.comment_author, "parent_comment_date": parent_comment_date, "parent_comment": parent.comment, "comment_status": comment_status, "comment_count": article.comment_count, "comment_author_ip": comm.comment_author_ip, "article_guid": article.guid, "article_title": article.title, "comment_author_email": comm.comment_author_email, "article_id": article.pk} elif user: jsondata = {"id": comm.pk, "comment": comm.comment, "comment_author": comm.comment_author, "comment_date": comment_time, "avatar": str(user.avatar), "comment_status": comment_status, "comment_count": article.comment_count, "comment_author_ip": comm.comment_author_ip, "article_guid": article.guid, "article_title": article.title, "comment_author_email": comm.comment_author_email, "article_id": article.pk} else: jsondata = {"id": comm.pk, "comment": comm.comment, "comment_author": comm.comment_author, "comment_date": comment_time, "avatar": "/static/assets/avatars/avatar.png", "comment_status": comment_status, "comment_count": article.comment_count, "comment_author_ip": comm.comment_author_ip, "article_guid": article.guid, "article_title": article.title, "comment_author_email": comm.comment_author_email, "article_id": article.pk} return HttpResponse(json.dumps(jsondata), content_type="application/json") else: context['msg'] = "表单错误" return render(req, 'das/msg.html', context) else: context['msg'] = '非法操作' return render(req, 'das/msg.html', context) @permission_required('das.access_dashboard', login_url='/login.html') @login_required(login_url="/login") def comment_show(req, pindex, comment_status): context = {} context['sitemeta'] = settings.SITEMETA context['active'] = 'comment_show' if comment_status == "": comment_status = '0' if req.method == "POST": form = CommentStatusForm(req.POST) if form.is_valid(): comment_status = form.cleaned_data['comment_status'] user = req.session.get('user') if user.is_superuser or user.has_perm('das.access_dashboard'): if comment_status == '0': comments = Comment.objects.all().order_by('-comment_date') else: comments = Comment.objects.filter(comment_status=comment_status).order_by('-comment_date') else: if comment_status == '0': comments = Comment.objects.filter(user=user).order_by('-comment_date') else: comments = Comment.objects.filter(user=user, comment_status=comment_status).order_by('-comment_date') paginator = Paginator(comments, 10) if pindex == '': pindex = '1' try: page = paginator.page(int(pindex)) except: pindex = int(pindex) - 1 page = paginator.page(int(pindex)) context['pages'] = page context['comments'] = comments context['comment_status'] = comment_status return render(req, 'das/comment-show.html', context) @login_required(login_url="/login") def my_comment(req, pindex, comment_status): context = {} context['sitemeta'] = settings.SITEMETA context['active'] = 'my_comment' if comment_status == "": comment_status = '0' if req.method == "POST": form = CommentStatusForm(req.POST) if form.is_valid(): comment_status = form.cleaned_data['comment_status'] user = req.session.get('user') if comment_status == '0': comments = Comment.objects.filter(user=user).order_by('-comment_date') else: comments = Comment.objects.filter(user=user, comment_status=comment_status).order_by('-comment_date') paginator = Paginator(comments, 10) if pindex == '': pindex = '1' try: page = paginator.page(int(pindex)) except: pindex = int(pindex) - 1 page = paginator.page(int(pindex)) context['pages'] = page context['comments'] = comments context['comment_status'] = comment_status return render(req, 'das/my-comment.html', context) @permission_required('das.access_dashboard', login_url='/login.html') @login_required(login_url="/login") def comment_audit(req, comment_id, oper): context = {} # context['sitemeta'] = settings.SITEMETA context['active'] = 'comment_show' comment = Comment.objects.get(pk=comment_id) if comment: if oper == "reject": # ret = comment_del(req, comment_id) comment.comment_status = "2" comment.save() context['msg'] = "评论驳回成功" context['result'] = "success" elif oper == "accept": comment.comment_status = "1" comment.save() article = comment.article article.comment_count = article.comment_count + 1 article.save() context['msg'] = "评论审核通过" context['result'] = "success" else: context['msg'] = "评论不存在" context['result'] = "failure" return HttpResponse(json.dumps(context), content_type="application/json") @login_required(login_url="/login") def comment_del(req, comment_id): context = {} # context['sitemeta'] = settings.SITEMETA context['active'] = 'comment_show' user = req.session.get('user') if user.is_superuser or user.has_perm('das.access_dashboard'): comment = Comment.objects.get(pk=comment_id) else: comment = Comment.objects.get(user=user, pk=comment_id) if comment: article = comment.article comment.delete() article.comment_count = article.comment_set.filter(comment_status=1).count() article.save() context['result'] = "success" context['msg'] = "评论删除成功" context['comment_count'] = article.comment_count else: context['result'] = "failure" context['msg'] = "评论不存在" return HttpResponse(json.dumps(context), content_type="application/json") @login_required(login_url="/login") def like_article(req, article_id, user_id): context = {} # context['sitemeta'] = settings.SITEMETA article = Article.objects.get(pk=article_id) user = User.objects.get(pk=user_id) try: likearticleship = Likearticleship.objects.filter(user=user, article=article) if likearticleship: likearticleship[0].delete() else: likearticleship = Likearticleship.objects.create(user=user, article=article) likearticleship.save() context['result'] = 'success' except: context['result'] = 'failure' finally: return HttpResponse(json.dumps(context), content_type="application/json") @login_required(login_url="/login") def like_comment(req, comment_id, user_id): context = {} # context['sitemeta'] = settings.SITEMETA comment = Comment.objects.get(pk=comment_id) user = User.objects.get(pk=user_id) try: likecommentship = Likecommentship.objects.filter(user=user, comment=comment) if likecommentship: likecommentship[0].delete() else: likecommentship = Likecommentship.objects.create(user=user, comment=comment) likecommentship.save() context['result'] = 'success' except: context['result'] = 'failure' finally: return HttpResponse(json.dumps(context), content_type="application/json") @permission_required('das.access_all', login_url='/login.html') @login_required(login_url="/login") def set_site(req): context = {} context['sitemeta'] = settings.SITEMETA context['active'] = 'set_site' site_meta = None try: site_meta = Sitemeta.objects.all()[0] except: site_meta = None if req.method == 'POST': form = SiteMetaForm(req.POST) if form.is_valid(): site_name = form.cleaned_data['site_name'] description = form.cleaned_data['description'] keywords = form.cleaned_data['keywords'] author = form.cleaned_data['author'] title = form.cleaned_data['title'] subtitle = form.cleaned_data['subtitle'] announcement = form.cleaned_data['announcement'] favicon = form.cleaned_data['favicon'] head_background_img = form.cleaned_data['head_background_img'] author_img = form.cleaned_data['author_img'] head_code = form.cleaned_data['head_code'] foot_code = form.cleaned_data['foot_code'] is_weibo = form.cleaned_data['is_weibo'] if not is_weibo: is_weibo = 0 wb_uid = form.cleaned_data['wb_uid'] is_wechat = form.cleaned_data['is_wechat'] if not is_wechat: is_wechat = 0 wechat_qrcode = form.cleaned_data['wechat_qrcode'] is_qqgroup = form.cleaned_data['is_qqgroup'] if not is_qqgroup: is_qqgroup = 0 qqgroup_url = form.cleaned_data['qqgroup_url'] is_twitter = form.cleaned_data['is_twitter'] if not is_twitter: is_twitter = 0 twitter_id = form.cleaned_data['twitter_id'] if not site_meta: site_meta = Sitemeta.objects.create(site_name=site_name, description=description, keywords=keywords, author=author, title=title, subtitle=subtitle, announcement=announcement, favicon=favicon, head_background_img=head_background_img, author_img=author_img, head_code=head_code, foot_code=foot_code, is_weibo=is_weibo, wb_uid=wb_uid, is_wechat=is_wechat, wechat_qrcode=wechat_qrcode, is_qqgroup=is_qqgroup, qqgroup_url=qqgroup_url, is_twitter=is_twitter, twitter_id=twitter_id) else: site_meta.site_name = site_name site_meta.description = description site_meta.keywords = keywords site_meta.author = author site_meta.title = title site_meta.subtitle = subtitle site_meta.announcement = announcement site_meta.favicon = favicon site_meta.head_background_img = head_background_img site_meta.author_img = author_img site_meta.head_code = head_code site_meta.foot_code = foot_code site_meta.is_weibo = is_weibo site_meta.wb_uid = wb_uid site_meta.is_wechat = is_wechat site_meta.wechat_qrcode = wechat_qrcode site_meta.is_qqgroup = is_qqgroup site_meta.qqgroup_url = qqgroup_url site_meta.is_twitter = is_twitter site_meta.twitter_id = twitter_id site_meta.save() context['sitemeta'] = cache_sitemeta(change=True) settings.SITEMETA = site_meta context['site_meta'] = site_meta return render(req, 'das/site_edit.html', context) else: context['msg'] = "表单错误" return render(req, 'das/msg.html', context) context['site_meta'] = site_meta return render(req, 'das/site_edit.html', context) @login_required(login_url="/login") def upload_page(req, pindex): context = {} context['sitemeta'] = settings.SITEMETA user = req.session.get('user') if user.is_superuser or user.has_perm('das.access_dashboard'): medias = Media.objects.all().order_by("-upload_date") else: medias = Media.objects.filter(file_author=user).order_by("-upload_date") paginator = Paginator(medias, 10) active = 'active' if pindex == '': pindex = '1' active = None try: page = paginator.page(int(pindex)) except: pindex = int(pindex) - 1 page = paginator.page(int(pindex)) context['pages'] = page context['media'] = medias context['active'] = active return render(req, 'das/dropzone.html', context) @login_required(login_url="/login") def media_view(req, pindex): context = {} context['sitemeta'] = settings.SITEMETA context['active'] = 'media_all' user = req.session.get('user') if user.is_superuser or user.has_perm('das.access_dashboard'): medias = Media.objects.all().order_by("-upload_date") else: medias = Media.objects.filter(file_author=user).order_by("-upload_date") paginator = Paginator(medias, 10) if pindex == '': pindex = '1' try: page = paginator.page(int(pindex)) except: pindex = int(pindex) - 1 page = paginator.page(int(pindex)) context['pages'] = page context['media'] = medias return render(req, 'das/media-show.html', context) @login_required(login_url="/login") def media_modify(req, id): context = {} context['sitemeta'] = settings.SITEMETA context['active'] = 'media_all' user = req.session.get('user') if user.is_superuser or user.has_perm('das.access_dashboard'): media = Media.objects.get(pk=id) else: media = Media.objects.get(pk=id, file_author=user) if not media: context['msg'] = "媒体不存在" return render(req, 'das/msg.html', context) if req.method == 'POST': form = MediaForm(req.POST) if form.is_valid(): description = form.cleaned_data['description'] alt = form.cleaned_data['alt'] o_file_name = form.cleaned_data['o_file_name'] media.o_file_name = o_file_name media.description = description media.alt = alt try: media.save() context['msg'] = '修改成功' return redirect('/das/media/list/1', context) except: context['msg'] = '失败,请重试' return render(req, 'das/msg.html', context) else: context['msg'] = "表单错误" return render(req, 'das/msg.html', context) context['media'] = media return render(req, 'das/media-modify.html', context) @login_required(login_url="/login") def media_del(req, id): context = {} # context['sitemeta'] = settings.SITEMETA context['active'] = 'media_all' user = req.session.get('user') if user.is_superuser or user.has_perm('das.access_dashboard'): try: media = Media.objects.get(pk=id) except: context['msg'] = "媒体不存在" context['result'] = 'failure' return HttpResponse(json.dumps(context), content_type="application/json") else: try: media = Media.objects.get(pk=id, file_author=user) except: context['msg'] = "媒体不存在" context['result'] = 'failure' return HttpResponse(json.dumps(context), content_type="application/json") try: os.remove(media.file_local_path) media.delete() context['msg'] = '媒体删除成功' context['result'] = 'success' return HttpResponse(json.dumps(context), content_type="application/json") except: context['msg'] = "媒体删除失败,请重试" context['result'] = 'failure' return HttpResponse(json.dumps(context), content_type="application/json") @login_required(login_url="/login") def media_del_by_o_name(req, o_name): context = {} # context['sitemeta'] = settings.SITEMETA user = req.session.get('user') if user.is_superuser or user.has_perm('das.access_dashboard'): try: media = Media.objects.filter(o_file_name=o_name).order_by("-upload_date")[0] except: context['msg'] = "媒体不存在" context['result'] = 'failure' return HttpResponse(json.dumps(context), content_type="application/json") else: try: media = Media.objects.filter(o_file_name=o_name, file_author=user).order_by("-upload_date")[0] except: context['msg'] = "媒体不存在" context['result'] = 'failure' return HttpResponse(json.dumps(context), content_type="application/json") try: os.remove(media.file_local_path) media.delete() context['msg'] = '媒体删除成功' context['result'] = 'success' return HttpResponse(json.dumps(context), content_type="application/json") except: context['msg'] = "媒体删除失败,请重试" context['result'] = 'failure' return HttpResponse(json.dumps(context), content_type="application/json") @login_required(login_url="/login") def media_new_view(req): context = {} context['sitemeta'] = settings.SITEMETA context['active'] = 'media_new' return render(req, 'das/media-add.html', context) # def group_view(req): # context = {} # context['sitemeta'] = settings.SITEMETA # context['active'] = 'group_mgr' # groups = None # try: # groups = Group.objects.all() # except: # groups = None # finally: # context['groups'] = groups # return render(req, 'das/groups-list.html', context) # # # def group_add_view(req): # context = {} # context['sitemeta'] = settings.SITEMETA # context['active'] = 'group_mgr' # if req.method == 'POST': # form = GroupForm(req.POST) # if form.is_valid(): # name = form.cleaned_data['name'] # perm = form.cleaned_data['perm'] # members = form.cleaned_data['members'] # try: # group = Group.objects.get(name=name) # context['msg'] = "用户组已经存在" # return render(req, 'das/msg.html', context) # except: # group = Group.objects.create(name=name) # group.save() # permission = Permission.objects.get(codename=perm) # group.permissions.add(permission) # if members != '': # members = members.split(',') # for username in members: #
{'member': member, 'plans': plans }, context_instance=RequestContext(request)) @login_required def supply_and_demand(request, from_date, to_date): try: from_date = datetime.datetime(*time.strptime(from_date, '%Y_%m_%d')[0:5]).date() to_date = datetime.datetime(*time.strptime(to_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 sdtable = supply_demand_table(from_date, to_date) return render_to_response('distribution/supply_demand.html', { 'from_date': from_date, 'to_date': to_date, 'sdtable': sdtable, 'tabnav': "distribution/tabnav.html", 'tabs': 'D', }, context_instance=RequestContext(request)) @login_required def dojo_supply_and_demand(request, from_date, to_date): from_datestring = from_date to_datestring = to_date try: from_date = datetime.datetime(*time.strptime(from_date, '%Y_%m_%d')[0:5]).date() to_date = datetime.datetime(*time.strptime(to_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 columns = sd_columns(from_date, to_date) return render_to_response('distribution/dojo_supply_demand.html', { 'from_date': from_date, 'to_date': to_date, 'from_datestring': from_datestring, 'to_datestring': to_datestring, 'columns': columns, 'column_count': len(columns), 'tabnav': "distribution/tabnav.html", 'tabs': 'D', }, context_instance=RequestContext(request)) @login_required def json_supply_and_demand(request, from_date, to_date): try: from_date = datetime.datetime(*time.strptime(from_date, '%Y_%m_%d')[0:5]).date() to_date = datetime.datetime(*time.strptime(to_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 rows = supply_demand_rows(from_date, to_date) count = len(rows) try: range = request.META["HTTP_RANGE"] range = range.split("=")[1] range = range.split("-") range_start = int(range[0]) range_end = int(range[1]) except KeyError: range_start = 0 range_end = count if count < range_end: range_end = count rows = rows[range_start:range_end + 1] data = simplejson.dumps(rows) response = HttpResponse(data, mimetype="text/json-comment-filtered") response['Cache-Control'] = 'no-cache' response['Content-Range'] = "".join(["items ", str(range_start), "-", str(range_end), "/", str(count + 1)]) return response @login_required def dojo_income(request, from_date, to_date): from_datestring = from_date to_datestring = to_date try: from_date = datetime.datetime(*time.strptime(from_date, '%Y_%m_%d')[0:5]).date() to_date = datetime.datetime(*time.strptime(to_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 income_table = suppliable_demand(from_date, to_date) total_net = sum(row[len(row)-1] for row in income_table.rows) total_gross = sum(row[len(row)-2] for row in income_table.rows) columns = sd_columns(from_date, to_date) return render_to_response('distribution/dojo_income.html', { 'from_date': from_date, 'to_date': to_date, 'from_datestring': from_datestring, 'to_datestring': to_datestring, 'total_net': total_net, 'total_gross': total_gross, 'columns': columns, 'column_count': len(columns) + 2, 'tabnav': "distribution/tabnav.html", 'tabs': 'D', }, context_instance=RequestContext(request)) @login_required def json_income(request, from_date, to_date): try: from_date = datetime.datetime(*time.strptime(from_date, '%Y_%m_%d')[0:5]).date() to_date = datetime.datetime(*time.strptime(to_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 rows = json_income_rows(from_date, to_date) count = len(rows) try: range = request.META["HTTP_RANGE"] range = range.split("=")[1] range = range.split("-") range_start = int(range[0]) range_end = int(range[1]) except KeyError: range_start = 0 range_end = count if count < range_end: range_end = count rows = rows[range_start:range_end + 1] data = simplejson.dumps(rows) response = HttpResponse(data, mimetype="text/json-comment-filtered") response['Cache-Control'] = 'no-cache' response['Content-Range'] = "".join(["items ", str(range_start), "-", str(range_end), "/", str(count + 1)]) return response @login_required def income(request, from_date, to_date): try: from_date = datetime.datetime(*time.strptime(from_date, '%Y_%m_%d')[0:5]).date() to_date = datetime.datetime(*time.strptime(to_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 income_table = suppliable_demand(from_date, to_date) total_net = sum(row[len(row)-1] for row in income_table.rows) total_gross = sum(row[len(row)-2] for row in income_table.rows) return render_to_response('distribution/income.html', { 'from_date': from_date, 'to_date': to_date, 'total_net': total_net, 'total_gross': total_gross, 'income_table': income_table, }, context_instance=RequestContext(request)) @login_required def member_supply_and_demand(request, from_date, to_date, member_id): try: member = Party.objects.get(pk=member_id) except Party.DoesNotExist: raise Http404 try: from_date = datetime.datetime(*time.strptime(from_date, '%Y_%m_%d')[0:5]).date() to_date = datetime.datetime(*time.strptime(to_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 sdtable = supply_demand_table(from_date, to_date, member) plan_type = "Production" if member.is_customer(): plan_type = "Consumption" #import pdb; pdb.set_trace() return render_to_response('distribution/member_plans.html', { 'from_date': from_date, 'to_date': to_date, 'sdtable': sdtable, 'member': member, 'plan_type': plan_type, 'tabnav': "distribution/tabnav.html", }, context_instance=RequestContext(request)) @login_required def dojo_member_plans(request, from_date, to_date, member_id): try: member = Party.objects.get(pk=member_id) except Party.DoesNotExist: raise Http404 from_datestring = from_date to_datestring = to_date try: from_date = datetime.datetime(*time.strptime(from_date, '%Y_%m_%d')[0:5]).date() to_date = datetime.datetime(*time.strptime(to_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 #sdtable = supply_demand_table(from_date, to_date, member) if member.is_customer(): plan_type = "Consumption" products = CustomerProduct.objects.filter(customer=member, planned=True) else: plan_type = "Production" products = ProducerProduct.objects.filter(producer=member, planned=True) columns = plan_columns(from_date, to_date) return render_to_response('distribution/dojo_member_plans.html', { 'from_date': from_date, 'to_date': to_date, 'columns': columns, 'column_count': len(columns), 'member': member, 'from_datestring': from_datestring, 'to_datestring': to_datestring, 'plan_type': plan_type, 'tabnav': "distribution/tabnav.html", }, context_instance=RequestContext(request)) @login_required def json_member_plans(request, from_date, to_date, member_id): #import pdb; pdb.set_trace() try: member = Party.objects.get(pk=member_id) except Party.DoesNotExist: raise Http404 try: from_date = datetime.datetime(*time.strptime(from_date, '%Y_%m_%d')[0:5]).date() to_date = datetime.datetime(*time.strptime(to_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 if member.is_customer(): plan_type = "Consumption" products = CustomerProduct.objects.filter(customer=member, planned=True) else: plan_type = "Production" #products = ProducerProduct.objects.filter(producer=member, planned=True) products = [plan.product for plan in ProductPlan.objects.filter(member=member)] products = list(set(products)) rows = plans_for_dojo(member, products, from_date, to_date) count = len(rows) try: range = request.META["HTTP_RANGE"] range = range.split("=")[1] range = range.split("-") range_start = int(range[0]) range_end = int(range[1]) except KeyError: range_start = 0 range_end = count if count < range_end: range_end = count rows = rows[range_start:range_end + 1] data = simplejson.dumps(rows) response = HttpResponse(data, mimetype="text/json-comment-filtered") response['Cache-Control'] = 'no-cache' response['Content-Range'] = "".join(["items ", str(range_start), "-", str(range_end), "/", str(count + 1)]) return response @login_required def supply_and_demand_week(request, tabs, week_date): try: week_date = datetime.datetime(*time.strptime(week_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 sdtable = supply_demand_weekly_table(week_date) tabnav = "distribution/tabnav.html" if tabs == "P": tabnav = "producer/producer_tabnav.html" return render_to_response('distribution/supply_demand_week.html', { 'week_date': week_date, 'sdtable': sdtable, 'tabnav': tabnav, }, context_instance=RequestContext(request)) @login_required def dojo_supply_and_demand_week(request, tabs, week_date): try: week_date = datetime.datetime(*time.strptime(week_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 sdtable = dojo_supply_demand_weekly_table(week_date) columns = sdtable.columns tabnav = "distribution/tabnav.html" if tabs == "P": tabnav = "producer/producer_tabnav.html" return render_to_response('distribution/dojo_supply_demand_week.html', { 'week_date': week_date, 'columns': columns, 'column_count': len(columns), 'tabnav': tabnav, }, context_instance=RequestContext(request)) @login_required def json_supply_and_demand_week(request, week_date): try: week_date = datetime.datetime(*time.strptime(week_date, '%Y_%m_%d')[0:5]).date() except ValueError: raise Http404 tbl = dojo_supply_demand_weekly_table(week_date) rows = tbl.rows #import pdb; pdb.set_trace() count = len(rows) try: range = request.META["HTTP_RANGE"] range = range.split("=")[1] range = range.split("-") range_start = int(range[0]) range_end = int(range[1]) except KeyError: range_start = 0 range_end = count if count < range_end: range_end = count rows = rows[range_start:range_end + 1] data = simplejson.dumps(rows) response = HttpResponse(data, mimetype="text/json-comment-filtered") response['Cache-Control'] = 'no-cache' response['Content-Range'] = "".join(["items ", str(range_start), "-", str(range_end), "/", str(count + 1)]) return response @login_required def produceravail(request, prod_id, year, month, day): availdate = datetime.date(int(year), int(month), int(day)) availdate = availdate - datetime.timedelta(days=datetime.date.weekday(availdate)) + datetime.timedelta(days=2) weekstart = availdate - datetime.timedelta(days=datetime.date.weekday(availdate)) try: producer = Party.objects.get(pk=prod_id) inventory = InventoryItem.objects.filter( Q(producer=producer) & (Q(onhand__gt=0) | Q(inventory_date__range=(weekstart, availdate)))) except Party.DoesNotExist: raise Http404 return render_to_response('distribution/producer_avail.html', {'producer': producer, 'avail_date': weekstart, 'inventory': inventory }, context_instance=RequestContext(request)) @login_required def meatavail(request, prod_id, year, month, day): availdate = datetime.date(int(year), int(month), int(day)) availdate = availdate - datetime.timedelta(days=datetime.date.weekday(availdate)) + datetime.timedelta(days=5) weekstart = availdate - datetime.timedelta(days=datetime.date.weekday(availdate)) try: producer = Party.objects.get(pk=prod_id) inventory = InventoryItem.objects.filter( Q(producer=producer) & (Q(onhand__gt=0) | Q(inventory_date__range=(weekstart, availdate)))) except Party.DoesNotExist: raise Http404 return render_to_response('distribution/meat_avail.html', {'producer': producer, 'avail_date': weekstart, 'inventory': inventory }, context_instance=RequestContext(request)) @login_required def all_avail(request): return list_detail.object_list( request, queryset = AvailItem.objects.select_related().order_by( 'distribution_availproducer.avail_date', 'distribution_product.short_name', 'distribution_producer.short_name'), template_name = "distribution/avail_list.html", ) #def welcome(request): # return render_to_response('welcome.html') #changed help to flatpage #def help(request): # return render_to_response('distribution/help.html') class ProductActivity(): def __init__(self, category, product, avail, ordered, delivered, lots): self.category = category self.product = product self.avail = avail self.ordered = ordered self.delivered = delivered self.lots = lots @login_required def dashboard(request): try: fn = food_network() food_network_name = fn.long_name except FoodNetwork.DoesNotExist: fn = None food_network_name = "" thisdate = "" date_form = "" shorts_label = "" plans = [] shorts = [] orders = [] items = [] order_changes = [] if fn: thisdate = next_delivery_date() monday = thisdate - datetime.timedelta(days=datetime.date.weekday(thisdate)) saturday = monday + datetime.timedelta(days=5) date_form = DeliveryDateForm(initial={"next_delivery_date": thisdate}) items = fn.all_avail_items() for item in items: item.category = item.product.parent_string() item.product_name = item.product.short_name items = sorted(items, key=attrgetter('category', 'product_name')) plans = [] shorts = shorts_for_week() shorts_label = "Shorts vs Inventory" if use_plans_for_ordering(): plans = weekly_production_plans(thisdate) shorts_label = "Shorts vs Plans" orders = Order.objects.filter( delivery_date=thisdate).exclude(state="Unsubmitted") order_changes = list(OrderItemChange.objects.filter( order__delivery_date=thisdate, reason=1, )) order_changes.extend(list(OrderItemChange.objects.filter( reason=4, when_changed__range=(monday, saturday) ))) #import pdb; pdb.set_trace() return render_to_response('distribution/dashboard.html', {'plans': plans, 'items': items, 'shorts': shorts, 'shorts_label': shorts_label, 'orders': orders, 'delivery_date': thisdate, 'date_form': date_form, 'food_net': fn, 'food_network_name': food_network_name, 'order_changes': order_changes, }, context_instance=RequestContext(request)) @login_required def reset_date(request): if request.method == "POST": try: fn = food_network() food_network_name = fn.long_name form = DeliveryDateForm(request.POST) if form.is_valid(): next_delivery_date = form.cleaned_data['next_delivery_date'] fn.next_delivery_date = next_delivery_date fn.save() #todo: nips shd be rethought with delivery skeds #import pdb; pdb.set_trace() nips = ProducerProduct.objects.filter( inventoried=False, default_avail_qty__gt=0, product__sellable=True) for nip in nips: item, created = InventoryItem.objects.get_or_create( product=nip.product, producer=nip.producer, inventory_date=next_delivery_date, planned=nip.default_avail_qty) if created: item.remaining = nip.default_avail_qty item.save() except FoodNetwork.DoesNotExist: pass return HttpResponseRedirect("/distribution/dashboard/") @login_required def all_orders(request): return list_detail.object_list( request, queryset = OrderItem.objects.all(), template_name = "distribution/order_list.html", ) @login_required def all_deliveries(request): return list_detail.object_list( request, queryset = InventoryTransaction.objects.filter(transaction_type='Delivery'), template_name = "distribution/delivery_list.html", ) @login_required def orders_with_deliveries(request, year, month, day): thisdate = datetime.date(int(year), int(month), int(day)) orderitem_list = OrderItem.objects.select_related().filter(order__delivery_date=thisdate).order_by('order', 'distribution_product.short_name') return render_to_response('distribution/order_delivery_list.html', {'delivery_date': thisdate, 'orderitem_list': orderitem_list}, context_instance=RequestContext(request)) @login_required def payment_selection(request): thisdate = next_delivery_date() init = { 'from_date': thisdate - datetime.timedelta(days=7), 'to_date': thisdate + datetime.timedelta(days=5), } ihform = PaymentSelectionForm(data=request.POST or None, initial=init) msform = PaymentUpdateSelectionForm(data=request.POST or None) csform = CustomerPaymentSelectionForm(data=request.POST or None) if request.method == "POST": if request.POST.get('submit-payment-table'): if ihform.is_valid(): ihdata = ihform.cleaned_data producer_id = ihdata['producer'] from_date = ihdata['from_date'].strftime('%Y_%m_%d') to_date = ihdata['to_date'].strftime('%Y_%m_%d') due = 1 if ihdata['due'] else 0 paid_member = ihdata['paid_member'] return HttpResponseRedirect('/%s/%s/%s/%s/%s/%s/' % ('distribution/producerpayments', producer_id, from_date, to_date, due, paid_member))
# coding: utf-8 """***************************************************************************** * Copyright (C) 2021 Microchip Technology Inc. and its subsidiaries. * * Subject to your compliance with these terms, you may use Microchip software * and any derivatives exclusively with Microchip products. It is your * responsibility to comply with third party license terms applicable to your * use of third party software (including open source software) that may * accompany Microchip software. * * THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER * EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A * PARTICULAR PURPOSE. * * IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, * INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND * WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS * BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE * FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN * ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY, * THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE. *****************************************************************************""" ################################################################################ #### Call-backs #### ################################################################################ #Update Symbol Visibility def setClassB_SymbolVisibility(MySymbol, event): MySymbol.setVisible(event["value"]) ################################################################################ #### Component #### ################################################################################ def instantiateComponent(classBComponent): configName = Variables.get("__CONFIGURATION_NAME") classBMenu = classBComponent.createMenuSymbol("CLASSB_MENU", None) classBMenu.setLabel("Class B Startup Test Configuration") execfile(Module.getPath() +"/config/interface.py") print("Test Module: Harmony Class B Library") #Device params #classBFlashNode = ATDF.getNode("/avr-tools-device-file/devices/device/address-spaces/address-space/memory-segment@[name=\"FLASH\"]") classBFlashNode = ATDF.getNode("/avr-tools-device-file/devices/device/address-spaces/address-space/memory-segment@[name=\"code\"]") if classBFlashNode != None: #Flash size classB_FLASH_SIZE = classBComponent.createIntegerSymbol("CLASSB_FLASH_SIZE", None) classB_FLASH_SIZE.setVisible(False) classB_FLASH_SIZE.setDefaultValue(int(classBFlashNode.getAttribute("size"), 16)) #classB_FLASH_SIZE.setDefaultValue(0) print("Test Module: Harmony Class B Library") classBSRAMNode = ATDF.getNode("/avr-tools-device-file/devices/device/address-spaces/address-space/memory-segment@[name=\"kseg0_data_mem\"]") #classBSRAMNode = ATDF.getNode("/avr-tools-device-file/devices/device/address-spaces/address-space/memory-segment@[name=\"HSRAM\"]") print("Test Module: Harmony Class B Library") if classBSRAMNode != None: #SRAM size classB_SRAM_SIZE = classBComponent.createIntegerSymbol("CLASSB_SRAM_SIZE", None) classB_SRAM_SIZE.setVisible(False) classB_SRAM_SIZE.setDefaultValue(int(classBSRAMNode.getAttribute("size"), 16)) #classB_SRAM_SIZE.setDefaultValue(0) #SRAM address classB_SRAM_ADDR = classBComponent.createHexSymbol("CLASSB_SRAM_START_ADDRESS", None) classB_SRAM_ADDR.setVisible(False) classB_SRAM_ADDR.setDefaultValue(int(classBSRAMNode.getAttribute("start"), 16) + (0xA0000000)) #classB_SRAM_SIZE.setDefaultValue(0) #SRAM address MSB 24 bits classB_SRAM_START_MSB = classBComponent.createHexSymbol("CLASSB_SRAM_START_MSB", None) classB_SRAM_START_MSB.setVisible(False) #classB_SRAM_START_MSB.setDefaultValue(int(classBSRAMNode.getAttribute("start"), 16) >> 8) classB_SRAM_START_MSB.setDefaultValue((int(classBSRAMNode.getAttribute("start"), 16) >> 8) + (0xA0000000 >> 8)) #classB_SRAM_SIZE.setDefaultValue(0) print("Test Module: Harmony Class B Library") # Insert CPU test classB_UseCPUTest = classBComponent.createBooleanSymbol("CLASSB_CPU_TEST_OPT", classBMenu) classB_UseCPUTest.setLabel("Test CPU Registers?") classB_UseCPUTest.setVisible(True) classB_UseCPUTest.setDefaultValue(False) # Insert SRAM test classB_UseSRAMTest = classBComponent.createBooleanSymbol("CLASSB_SRAM_TEST_OPT", classBMenu) classB_UseSRAMTest.setLabel("Test SRAM?") classB_UseSRAMTest.setVisible(True) classB_UseSRAMTest.setDefaultValue(False) # Select March algorithm for SRAM test classb_Ram_marchAlgo = classBComponent.createKeyValueSetSymbol("CLASSB_SRAM_MARCH_ALGORITHM", classB_UseSRAMTest) classb_Ram_marchAlgo.setLabel("Select RAM March algorithm") classb_Ram_marchAlgo.addKey("CLASSB_SRAM_MARCH_C", "0", "March C") classb_Ram_marchAlgo.addKey("CLASSB_SRAM_MARCH_C_MINUS", "1", "March C minus") classb_Ram_marchAlgo.addKey("CLASSB_SRAM_MARCH_B", "2", "March B") classb_Ram_marchAlgo.setOutputMode("Key") classb_Ram_marchAlgo.setDisplayMode("Description") classb_Ram_marchAlgo.setDescription("Selects the SRAM March algorithm to be used during startup") classb_Ram_marchAlgo.setDefaultValue(0) classb_Ram_marchAlgo.setVisible(False) #This should be enabled based on the above configuration classb_Ram_marchAlgo.setDependencies(setClassB_SymbolVisibility, ["CLASSB_SRAM_TEST_OPT"]) # Size of the area to be tested classb_Ram_marchSize = classBComponent.createIntegerSymbol("CLASSB_SRAM_MARCH_SIZE", classB_UseSRAMTest) classb_Ram_marchSize.setLabel("Size of the tested area (bytes)") classb_Ram_marchSize.setDefaultValue(classB_SRAM_SIZE.getValue() / 4) classb_Ram_marchSize.setVisible(False) classb_Ram_marchSize.setMin(0) # 1024 bytes are reserved for the use of Class B library classb_Ram_marchSize.setMax(classB_SRAM_SIZE.getValue() - 1024) classb_Ram_marchSize.setDescription("Size of the SRAM area to be tested starting from 0x20000400") classb_Ram_marchSize.setDependencies(setClassB_SymbolVisibility, ["CLASSB_SRAM_TEST_OPT"]) print("Test Module: Harmony Class B Library") # CRC-32 checksum availability classB_FlashCRC_Option = classBComponent.createBooleanSymbol("CLASSB_FLASH_CRC_CONF", classBMenu) classB_FlashCRC_Option.setLabel("Test Internal Flash?") classB_FlashCRC_Option.setVisible(True) classB_FlashCRC_Option.setDefaultValue(False) classB_FlashCRC_Option.setDescription("Enable this option if the CRC-32 checksum of the application image is stored at a specific address in the Flash") # Address at which CRC-32 of the application image is stored classB_CRC_address = classBComponent.createHexSymbol("CLASSB_FLASHCRC_ADDR", classB_FlashCRC_Option) classB_CRC_address.setLabel("Flash CRC location") classB_CRC_address.setDefaultValue(0xFE000) classB_CRC_address.setMin(0) classB_CRC_address.setMax(classB_FLASH_SIZE.getValue() - 4) classB_CRC_address.setVisible(False) #This should be enabled based on the above configuration classB_CRC_address.setDependencies(setClassB_SymbolVisibility, ["CLASSB_FLASH_CRC_CONF"]) # Insert Clock test classB_UseClockTest = classBComponent.createBooleanSymbol("CLASSB_CLOCK_TEST_OPT", classBMenu) classB_UseClockTest.setLabel("Test CPU Clock?") classB_UseClockTest.setVisible(True) # Acceptable CPU clock frequency error at startup classb_ClockTestPercentage = classBComponent.createKeyValueSetSymbol("CLASSB_CLOCK_TEST_PERCENT", classB_UseClockTest) classb_ClockTestPercentage.setLabel("Permitted CPU clock error at startup") classb_ClockTestPercentage.addKey("CLASSB_CLOCK_5PERCENT", "5", "+-5 %") classb_ClockTestPercentage.addKey("CLASSB_CLOCK_10PERCENT", "10", "+-10 %") classb_ClockTestPercentage.addKey("CLASSB_CLOCK_15PERCENT", "15", "+-15 %") classb_ClockTestPercentage.setOutputMode("Value") classb_ClockTestPercentage.setDisplayMode("Description") classb_ClockTestPercentage.setDescription("Selects the permitted CPU clock error at startup") classb_ClockTestPercentage.setDefaultValue(0) classb_ClockTestPercentage.setVisible(False) classb_ClockTestPercentage.setDependencies(setClassB_SymbolVisibility, ["CLASSB_CLOCK_TEST_OPT"]) # Clock test duration classb_ClockTestDuration = classBComponent.createIntegerSymbol("CLASSB_CLOCK_TEST_DURATION", classB_UseClockTest) classb_ClockTestDuration.setLabel("Clock Test Duration (ms)") classb_ClockTestDuration.setDefaultValue(5) classb_ClockTestDuration.setVisible(False) classb_ClockTestDuration.setMin(5) classb_ClockTestDuration.setMax(20) classb_ClockTestDuration.setDependencies(setClassB_SymbolVisibility, ["CLASSB_CLOCK_TEST_OPT"]) # Insert Interrupt test classB_UseInterTest = classBComponent.createBooleanSymbol("CLASSB_INTERRUPT_TEST_OPT", classBMenu) classB_UseInterTest.setLabel("Test Interrupts?") classB_UseInterTest.setVisible(True) classB_UseInterTest.setDefaultValue(False) classB_UseInterTest.setDescription("This self-test check interrupts operation with the help of NVIC, RTC and TC0") #symbol_debug = classBComponent.createCommentSymbol("CLASSB_DEBUG_MENU", None) #symbol_debug.setLabel("DEBUG") #symbol_debug.setVisible(True) #symbol_APPDEBUG_enabling = classBComponent.createBooleanSymbol("CLASSB_DEBUG_ENABLE", symbol_debug) #symbol_APPDEBUG_enabling.setLabel("Enable Debug parameters") #symbol_APPDEBUG_enabling.setVisible(True) #symbol_APPDEBUG_enabling.setDefaultValue(False) #symbol_APPDEBUG_enabling.setDescription("Enable debug parameters") classBReadOnlyParams = classBComponent.createMenuSymbol("CLASSB_ADDR_MENU", None) classBReadOnlyParams.setLabel("Build parameters (read-only) used by the library") # Read-only symbol for start of non-reserved SRAM classb_AppRam_start = classBComponent.createHexSymbol("CLASSB_SRAM_APP_START", classBReadOnlyParams) classb_AppRam_start.setLabel("Start address of non-reserved SRAM") classb_AppRam_start.setDefaultValue(0xA0000400) classb_AppRam_start.setReadOnly(True) classb_AppRam_start.setMin(0xA0000400) classb_AppRam_start.setMax(0xA0000400) classb_AppRam_start.setDescription("Initial 1kB of SRAM is used by the Class B library") #SRAM last word address classB_SRAM_lastWordAddr = classBComponent.createHexSymbol("CLASSB_SRAM_LASTWORD_ADDR", classBReadOnlyParams) classB_SRAM_lastWordAddr.setLabel("Address of the last word in SRAM") classB_SRAM_lastWordAddr.setReadOnly(True) classB_SRAM_lastWordAddr.setDefaultValue((0xA0000000 + classB_SRAM_SIZE.getValue() - 4)) classB_SRAM_lastWordAddr.setMin((0xA0000000 + classB_SRAM_SIZE.getValue() - 4)) classB_SRAM_lastWordAddr.setMax((0xA0000000 + classB_SRAM_SIZE.getValue() - 4)) sram_top = hex(classB_SRAM_lastWordAddr.getValue() + 4) classB_SRAM_lastWordAddr.setDescription("The SRAM memory address range is 0x00000000 to " + str(sram_top)) # Read-only symbol for CRC-32 polynomial classb_FlashCRCPoly = classBComponent.createHexSymbol("CLASSB_FLASH_CRC32_POLY", classBReadOnlyParams) classb_FlashCRCPoly.setLabel("CRC-32 polynomial for Flash test") classb_FlashCRCPoly.setDefaultValue(0xEDB88320) classb_FlashCRCPoly.setReadOnly(True) classb_FlashCRCPoly.setMin(0xEDB88320) classb_FlashCRCPoly.setMax(0xEDB88320) classb_FlashCRCPoly.setDescription("The CRC-32 polynomial used for Flash self-test is " + str(hex(classb_FlashCRCPoly.getValue()))) # Read-only symbol for max SysTick count classb_SysTickMaxCount = classBComponent.createHexSymbol("CLASSB_SYSTICK_MAXCOUNT", classBReadOnlyParams) classb_SysTickMaxCount.setLabel("Maximum SysTick count") classb_SysTickMaxCount.setDefaultValue(0xFFFFFF) classb_SysTickMaxCount.setReadOnly(True) classb_SysTickMaxCount.setMin(0xFFFFFF) classb_SysTickMaxCount.setMax(0xFFFFFF) classb_SysTickMaxCount.setDescription("The SysTick is a 24-bit counter with max count value " + str(hex(classb_SysTickMaxCount.getValue()))) # Read-only symbol for max CPU clock frequency classb_CPU_MaxClock = classBComponent.createIntegerSymbol("CLASSB_CPU_MAX_CLOCK", classBReadOnlyParams) classb_CPU_MaxClock.setLabel("Maximum CPU clock frequency") classb_CPU_MaxClock.setDefaultValue(200000000) classb_CPU_MaxClock.setReadOnly(True) classb_CPU_MaxClock.setMin(200000000) classb_CPU_MaxClock.setMax(200000000) classb_CPU_MaxClock.setDescription("The self-test for CPU clock frequency assumes that the maximum CPU clock frequency is " + str(classb_CPU_MaxClock.getValue()) + "Hz") # Read-only symbol for expected RTC clock frequency classb_RTC_Clock = classBComponent.createIntegerSymbol("CLASSB_TMR1_EXPECTED_CLOCK", classBReadOnlyParams) classb_RTC_Clock.setLabel("Expected RTC clock frequency") classb_RTC_Clock.setDefaultValue(32768) classb_RTC_Clock.setReadOnly(True) classb_RTC_Clock.setMin(32768) classb_RTC_Clock.setMax(32768) classb_RTC_Clock.setDescription("The self-test for CPU clock frequency expects the RTC clock frequency to be " + str(classb_RTC_Clock.getValue()) + "Hz") # Read-only symbol for maximum configurable accuracy for CPU clock self-test classb_MaxAccuracy = classBComponent.createIntegerSymbol("CLASSB_CPU_CLOCK_TEST_ACCUR", classBReadOnlyParams) classb_MaxAccuracy.setLabel("Maximum accuracy for CPU clock test") classb_MaxAccuracy.setDefaultValue(5) classb_MaxAccuracy.setReadOnly(True) classb_MaxAccuracy.setMin(5) classb_MaxAccuracy.setMax(5) classb_MaxAccuracy.setDescription("Error percentage selected for CPU clock frequency test must be " + str(classb_MaxAccuracy.getValue()) + "% or higher") ############################################################################ #### Code Generation #### ############################################################################ # Main Header File classBHeaderFile = classBComponent.createFileSymbol("CLASSB_HEADER", None) classBHeaderFile.setSourcePath("/templates/pic32mzw1_wfi32e01/classb.h.ftl") classBHeaderFile.setOutputName("classb.h") classBHeaderFile.setDestPath("/classb") classBHeaderFile.setProjectPath("config/" + configName + "/classb") classBHeaderFile.setType("HEADER") classBHeaderFile.setMarkup(True) # Main Source File classBSourceFile = classBComponent.createFileSymbol("CLASSB_SOURCE", None) classBSourceFile.setSourcePath("/templates/pic32mzw1_wfi32e01/classb.c.ftl") classBSourceFile.setOutputName("classb.c") classBSourceFile.setDestPath("/classb") classBSourceFile.setProjectPath("config/" + configName + "/classb") classBSourceFile.setType("SOURCE") classBSourceFile.setMarkup(True) # Header File common for all tests classBCommHeaderFile = classBComponent.createFileSymbol("CLASSB_COMMON_HEADER", None) classBCommHeaderFile.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_common.h.ftl") classBCommHeaderFile.setOutputName("classb_common.h") classBCommHeaderFile.setDestPath("/classb") classBCommHeaderFile.setProjectPath("config/" + configName +"/classb") classBCommHeaderFile.setType("HEADER") classBCommHeaderFile.setMarkup(True) # Source File for result handling classBSourceResultMgmt = classBComponent.createFileSymbol("CLASSB_SOURCE_RESULT_MGMT_S", None) classBSourceResultMgmt.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_result_management.S.ftl") classBSourceResultMgmt.setOutputName("classb_result_management.S") classBSourceResultMgmt.setDestPath("/classb") classBSourceResultMgmt.setProjectPath("config/" + configName + "/classb") classBSourceResultMgmt.setType("SOURCE") classBSourceResultMgmt.setMarkup(True) # Source File for CPU test classBSourceCpuTestAsm = classBComponent.createFileSymbol("CLASSB_SOURCE_CPUTEST_S", None) classBSourceCpuTestAsm.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_cpu_reg_test_asm.S.ftl") classBSourceCpuTestAsm.setOutputName("classb_cpu_reg_test_asm.S") classBSourceCpuTestAsm.setDestPath("/classb") classBSourceCpuTestAsm.setProjectPath("config/" + configName + "/classb") classBSourceCpuTestAsm.setType("SOURCE") classBSourceCpuTestAsm.setMarkup(True) # Source File for CPU test classBSourceCpuTestAsm = classBComponent.createFileSymbol("CLASSB_SOURCE_CPUTEST", None) classBSourceCpuTestAsm.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_cpu_reg_test.c.ftl") classBSourceCpuTestAsm.setOutputName("classb_cpu_reg_test.c") classBSourceCpuTestAsm.setDestPath("/classb") classBSourceCpuTestAsm.setProjectPath("config/" + configName + "/classb") classBSourceCpuTestAsm.setType("SOURCE") classBSourceCpuTestAsm.setMarkup(True) # Header File for CPU test classBHeaderCpuTestAsm = classBComponent.createFileSymbol("CLASSB_HEADER_CPU_TEST", None) classBHeaderCpuTestAsm.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_cpu_reg_test.h.ftl") classBHeaderCpuTestAsm.setOutputName("classb_cpu_reg_test.h") classBHeaderCpuTestAsm.setDestPath("/classb") classBHeaderCpuTestAsm.setProjectPath("config/" + configName +"/classb") classBHeaderCpuTestAsm.setType("HEADER") classBHeaderCpuTestAsm.setMarkup(True) # Source File for CPU PC test classBSourceCpuPCTest = classBComponent.createFileSymbol("CLASSB_SOURCE_CPUPC_TEST", None) classBSourceCpuPCTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_cpu_pc_test.c.ftl") classBSourceCpuPCTest.setOutputName("classb_cpu_pc_test.c") classBSourceCpuPCTest.setDestPath("/classb") classBSourceCpuPCTest.setProjectPath("config/" + configName + "/classb") classBSourceCpuPCTest.setType("SOURCE") classBSourceCpuPCTest.setMarkup(True) # Source File for SRAM test classBSourceSRAMTest = classBComponent.createFileSymbol("CLASSB_SOURCE_SRAM_TEST", None) classBSourceSRAMTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_sram_test.c.ftl") classBSourceSRAMTest.setOutputName("classb_sram_test.c") classBSourceSRAMTest.setDestPath("/classb") classBSourceSRAMTest.setProjectPath("config/" + configName + "/classb") classBSourceSRAMTest.setType("SOURCE") classBSourceSRAMTest.setMarkup(True) # Header File for SRAM test classBHeaderSRAMTest = classBComponent.createFileSymbol("CLASSB_HEADER_SRAM_TEST", None) classBHeaderSRAMTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_sram_test.h.ftl") classBHeaderSRAMTest.setOutputName("classb_sram_test.h") classBHeaderSRAMTest.setDestPath("/classb") classBHeaderSRAMTest.setProjectPath("config/" + configName +"/classb") classBHeaderSRAMTest.setType("HEADER") classBHeaderSRAMTest.setMarkup(True) # Source File for Flash test classBSourceFLASHTest = classBComponent.createFileSymbol("CLASSB_SOURCE_FLASH_TEST", None) classBSourceFLASHTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_flash_test.c.ftl") classBSourceFLASHTest.setOutputName("classb_flash_test.c") classBSourceFLASHTest.setDestPath("/classb") classBSourceFLASHTest.setProjectPath("config/" + configName + "/classb") classBSourceFLASHTest.setType("SOURCE") classBSourceFLASHTest.setMarkup(True) # Header File for Flash test classBHeaderFLASHTest = classBComponent.createFileSymbol("CLASSB_HEADER_FLASH_TEST", None) classBHeaderFLASHTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_flash_test.h.ftl") classBHeaderFLASHTest.setOutputName("classb_flash_test.h") classBHeaderFLASHTest.setDestPath("/classb") classBHeaderFLASHTest.setProjectPath("config/" + configName +"/classb") classBHeaderFLASHTest.setType("HEADER") classBHeaderFLASHTest.setMarkup(True) # Source File for Clock test classBSourceClockTest = classBComponent.createFileSymbol("CLASSB_SOURCE_CLOCK_TEST", None) classBSourceClockTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_clock_test.c.ftl") classBSourceClockTest.setOutputName("classb_clock_test.c") classBSourceClockTest.setDestPath("/classb") classBSourceClockTest.setProjectPath("config/" + configName + "/classb") classBSourceClockTest.setType("SOURCE") classBSourceClockTest.setMarkup(True) # Header File for Clock test classBHeaderClockTest = classBComponent.createFileSymbol("CLASSB_HEADER_CLOCK_TEST", None) classBHeaderClockTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_clock_test.h.ftl") classBHeaderClockTest.setOutputName("classb_clock_test.h") classBHeaderClockTest.setDestPath("/classb") classBHeaderClockTest.setProjectPath("config/" + configName +"/classb") classBHeaderClockTest.setType("HEADER") classBSourceClockTest.setMarkup(True) # Source File for Interrupt test classBSourceInterruptTest = classBComponent.createFileSymbol("CLASSB_SOURCE_INTERRUPT_TEST", None) classBSourceInterruptTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_interrupt_test.c.ftl") classBSourceInterruptTest.setOutputName("classb_interrupt_test.c") classBSourceInterruptTest.setDestPath("/classb") classBSourceInterruptTest.setProjectPath("config/" + configName + "/classb") classBSourceInterruptTest.setType("SOURCE") classBSourceInterruptTest.setMarkup(True) # Header File for Interrupt test classBHeaderInterruptTest = classBComponent.createFileSymbol("CLASSB_HEADER_INTERRUPT_TEST", None) classBHeaderInterruptTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_interrupt_test.h.ftl") classBHeaderInterruptTest.setOutputName("classb_interrupt_test.h") classBHeaderInterruptTest.setDestPath("/classb") classBHeaderInterruptTest.setProjectPath("config/" + configName +"/classb") classBHeaderInterruptTest.setType("HEADER") classBHeaderInterruptTest.setMarkup(True) # Source File for IO pin test classBSourceIOpinTest = classBComponent.createFileSymbol("CLASSB_SOURCE_IOPIN_TEST", None) classBSourceIOpinTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_io_pin_test.c.ftl") classBSourceIOpinTest.setOutputName("classb_io_pin_test.c") classBSourceIOpinTest.setDestPath("/classb") classBSourceIOpinTest.setProjectPath("config/" + configName + "/classb") classBSourceIOpinTest.setType("SOURCE") classBSourceIOpinTest.setMarkup(True) # Header File for IO pin test classBHeaderIOpinTest = classBComponent.createFileSymbol("CLASSB_HEADER_IOPIN_TEST", None) classBHeaderIOpinTest.setSourcePath("/templates/pic32mzw1_wfi32e01/classb_io_pin_test.h.ftl") classBHeaderIOpinTest.setOutputName("classb_io_pin_test.h") classBHeaderIOpinTest.setDestPath("/classb") classBHeaderIOpinTest.setProjectPath("config/"
narrowed CpG set to evaluate, try to reduce running time targetedSet = ontCallsKeySet ## ontCalls_narrow_set ## - set: need to intersect ## - None: no need to intersect if ontCalls_narrow_set is not None: targetedSet = targetedSet.intersection(ontCalls_narrow_set) if len(targetedSet) == 0: # no cpg sites for evaluation return ret_none_tuple if ontCalls_narrow_second_set is not None: targetedSet = targetedSet.intersection(ontCalls_narrow_second_set) if len(targetedSet) == 0: # no cpg sites for evaluation return ret_none_tuple for cpgKey in targetedSet: # key = (chr, start, strand) ##### for each sites, we perform per read stats: if satisfy_fully_meth_or_unmeth(bgTruth[cpgKey][0]): referenceCpGs += 1 if is_fully_meth(bgTruth[cpgKey][0]): mCsites_BGTruth += 1 elif is_fully_unmeth(bgTruth[cpgKey][0]): Csites_BGTruth += 1 else: raise Exception(f'We must see all certain sites here, but see meth_freq={bgTruth[cpgKey][0]}') for perCall in ontCalls[cpgKey]: # perCall is a tupple of (pred_class, pred_score) if perCall[0] == 1: mCalls += 1 elif perCall[0] == 0: cCalls += 1 else: raise Exception(f'Pred_class is only 0 or 1, but is {perCall}') ### variables needed to compute precission, recall etc.: if perCall[0] == 1 and is_fully_meth(bgTruth[cpgKey][0]): # true positive TP_5mC += 1 elif perCall[0] == 1 and is_fully_unmeth(bgTruth[cpgKey][0]): # false positive FP_5mC += 1 elif perCall[0] == 0 and is_fully_meth(bgTruth[cpgKey][0]): # false negative FN_5mC += 1 elif perCall[0] == 0 and is_fully_unmeth(bgTruth[cpgKey][0]): # true negative TN_5mC += 1 if perCall[0] == 0 and is_fully_unmeth(bgTruth[cpgKey][0]): # true positive TP_5C += 1 elif perCall[0] == 0 and is_fully_meth(bgTruth[cpgKey][0]): # false positive FP_5C += 1 elif perCall[0] == 1 and is_fully_unmeth(bgTruth[cpgKey][0]): # false negative FN_5C += 1 elif perCall[0] == 1 and is_fully_meth(bgTruth[cpgKey][0]): # true negative TN_5C += 1 ### prediction results, AUC related: ypred_of_ont_tool.append(perCall[0]) if np.isnan(perCall[1]): yscore_of_ont_tool.append(0.0) else: yscore_of_ont_tool.append(perCall[1]) if is_fully_meth(bgTruth[cpgKey][0]): # BG Truth label y_of_bgtruth.append(1) else: y_of_bgtruth.append(0) else: raise Exception(f'We must see all certain sites here, but see meth_freq={bgTruth[cpgKey][0]}') ### compute all per read stats: with warnings.catch_warnings(record=True) as w: try: accuracy = (TP_5mC + TN_5mC) / float(TP_5mC + FP_5mC + FN_5mC + TN_5mC) except ZeroDivisionError: accuracy = 0 try: predicted_condition_positive_5mC = float(TP_5mC + FP_5mC) precision_5mC = TP_5mC / predicted_condition_positive_5mC except ZeroDivisionError: precision_5mC = 0 try: predicted_condition_positive_5C = float(TP_5C + FP_5C) precision_5C = TP_5C / predicted_condition_positive_5C except ZeroDivisionError: precision_5C = 0 try: recall_5mC = TP_5mC / float(TP_5mC + FN_5mC) except ZeroDivisionError: recall_5mC = 0 try: recall_5C = TP_5C / float(TP_5C + FN_5C) except ZeroDivisionError: recall_5C = 0 # F1 score, precision and recall f1_micro = f1_score(y_of_bgtruth, ypred_of_ont_tool, average='micro') f1_macro = f1_score(y_of_bgtruth, ypred_of_ont_tool, average='macro') precision_micro = precision_score(y_of_bgtruth, ypred_of_ont_tool, average='micro') precision_macro = precision_score(y_of_bgtruth, ypred_of_ont_tool, average='macro') recall_micro = recall_score(y_of_bgtruth, ypred_of_ont_tool, average='micro') recall_macro = recall_score(y_of_bgtruth, ypred_of_ont_tool, average='macro') try: F1_5mC = 2 * ((precision_5mC * recall_5mC) / (precision_5mC + recall_5mC)) except ZeroDivisionError: F1_5mC = 0 try: F1_5C = 2 * ((precision_5C * recall_5C) / (precision_5C + recall_5C)) except ZeroDivisionError: F1_5C = 0 fprSwitch = 1 try: fpr, tpr, _ = roc_curve(y_of_bgtruth, yscore_of_ont_tool) average_precision = average_precision_score(y_of_bgtruth, yscore_of_ont_tool) except ValueError: logger.error( f"###\tERROR for roc_curve: y(Truth):{len(y_of_bgtruth)}, scores(Call pred):{len(yscore_of_ont_tool)}, \nother settings: {title}, {tagname}, {secondFilterBedFileName}") fprSwitch = 0 roc_auc = 0.0 average_precision = 0.0 if fprSwitch == 1: roc_auc = auc(fpr, tpr) ######################## if save_curve_data: # save y and y-pred and y-score for later plot: curve_data = {'yTrue': y_of_bgtruth, 'yPred': ypred_of_ont_tool, 'yScore': yscore_of_ont_tool} os.makedirs(os.path.join(outdir, 'curve_data'), exist_ok=True) outfn = os.path.join(outdir, 'curve_data', f'{prefix_name}.{tagname.replace(" ", "_")}.curve_data.pkl') with open(outfn, 'wb') as handle: pickle.dump(curve_data, handle) return (accuracy, roc_auc, average_precision, f1_macro, f1_micro, \ precision_macro, precision_micro, recall_macro, recall_micro, precision_5C, \ recall_5C, F1_5C, cCalls, precision_5mC, recall_5mC, \ F1_5mC, mCalls, referenceCpGs, Csites_BGTruth, mCsites_BGTruth, tagname,) def save_keys_to_single_site_bed(keys, outfn, callBaseFormat=1, outBaseFormat=1, nonstr='.'): """ Save all keys in set of ('chr 123 123 . . +\n', etc.) to outfn. We use non-string like . in 3rd, 4th columns by BED file format. :param keys: :param outfn: :return: """ if outfn.endswith('.gz'): outfile = gzip.open(outfn, 'wt') else: outfile = open(outfn, 'w') for key in keys: if outBaseFormat == 0: outfile.write( f'{key[0]}\t{key[1] - callBaseFormat + outBaseFormat}\t{key[1] - callBaseFormat + outBaseFormat + 1}\t{nonstr}\t{nonstr}\t{key[2]}\n') else: outfile.write( f'{key[0]}\t{key[1] - callBaseFormat + outBaseFormat}\t{key[1] - callBaseFormat + outBaseFormat}\t{nonstr}\t{nonstr}\t{key[2]}\n') outfile.close() def do_singleton_nonsingleton_scanner(): """ Do generate singleton and nonsingleton BED file :return: """ kbp = 5 singletonFilename = os.path.join(pic_base_dir, f'hg38_singletons_{kbp}bp.bed.gz') nonsingletonFilename = os.path.join(pic_base_dir, f'hg38_nonsingletons_{kbp}bp.bed.gz') SingletonsAndNonSingletonsScanner(reference_genome_hg38_fn, singletonFilename, nonsingletonFilename, kbp=kbp) kbp = 10 singletonFilename = os.path.join(pic_base_dir, f'hg38_singletons_{kbp}bp.bed.gz') nonsingletonFilename = os.path.join(pic_base_dir, f'hg38_nonsingletons_{kbp}bp.bed.gz') SingletonsAndNonSingletonsScanner(reference_genome_hg38_fn, singletonFilename, nonsingletonFilename, kbp=kbp) def SingletonsAndNonSingletonsScanner(referenceGenomeFile, outfileName_s, outfileName_ns, kbp=10): """ Generate singleton and non-singletons BED file, based on Reference Genome and KBP up and down streams. The output file is in 1-based at start coordinate system. kbp is up and down k-bp regions and evaluated on positive strand. Singletons: only one CpG in the region Nonsingletons: more than one CpG in the region """ reference = SeqIO.to_dict(SeqIO.parse(referenceGenomeFile, "fasta")) logger.debug( f"###\tSingletonsAndNonSingletonsScanner: {referenceGenomeFile} reference genome file is parsed, up and down bp={kbp}") outfile_s = gzip.open(outfileName_s, "wt") # "s" stands for Singletons outfile_ns = gzip.open(outfileName_ns, "wt") # "ns" stands for Non-Singletons for chromosome in list(reference.keys()): if chromosome not in HUMAN_CHR_SET: continue idxs = re.finditer('CG', str(reference[chromosome].seq).upper()) singleton = -1 # 1 will stand for yes, 0 for no for idx in idxs: # print(chromosome, idx, idx.start(), idx.end()) if singleton == -1: s = idx.start() + 1 # here 8: mock1 <_sre.SRE_Match object; span=(8, 10), match='CG'> 8 10 end_index = idx.end() # here 10: mock1 <_sre.SRE_Match object; span=(8, 10), match='CG'> 8 10 singleton = 1 else: if (idx.start() - end_index) < kbp: # we just found a non-singleton. I.e. accordingly to the Nanopolish approach, CGs closer than 5bp, are considered as non-singletons # Singletons are SR=XXXXXCGXXXXX # Non-singletons are SR=XXXXXCGXXXXCGXXXCGXXCGCGXXXXX , <5bp for pair of neighbor CGs end_index = idx.end() singleton = 0 else: # current CG is not part of non-singleton. It might mean that its not part of a big non-singleton or singleton upstream from it. We test which of these options below # The current CG is up to k-bp to previous CG, we store the previous regions into Singletons or Non-singletons if singleton == 1: # print(chromosome, s, e, "SINGLETON") outfile_s.write("{}\t{}\t{}\n".format(chromosome, s, end_index)) else: # print(chromosome, s, e, "NON-SINGLETON") outfile_ns.write("{}\t{}\t{}\n".format(chromosome, s, end_index)) s = idx.start() + 1 end_index = idx.end() singleton = 1 if singleton == 1: # this code repetition takes care of the last instance in the long list of CG indexes # print(chromosome, s, e, "SINGLETON") outfile_s.write("{}\t{}\t{}\n".format(chromosome, s, end_index)) else: # print(chromosome, s, e, "NON-SINGLETON") outfile_ns.write("{}\t{}\t{}\n".format(chromosome, s, end_index)) logger.debug(f"###\tNonSingletonsScanner: chromosome {chromosome} processed") outfile_s.close() outfile_ns.close() logger.debug( f"###\tSingletonsAndNonSingletonsScanner: {referenceGenomeFile} file processed, kbp={kbp}, save to Singletons:{outfile_s}, and Nonsingletons:{outfile_ns}") def eval_concordant_within_kbp_region(cpgList, evalcpg, kbp=10): """ Evaluate the kth elem if is concordant, by checking state of sites in k-bp region meth states For example with in 5bp: CGXXXXCG 4 times X is considered in 5bp region :param cpgList: :param k: :return: True if concordant, False is discordant """ # cpgList=list(cpgList) for cpg in cpgList: # cpg is (start,strand, meth_indicator) if abs(evalcpg[0] - cpg[ 0]) - 2 >= kbp: # not within kbp regions, skip (Note: kbp is number of X between CG, ex. CGXXCGXXCG) continue if evalcpg[1] != cpg[1]: # found there is a CPG not same state, return Discordant return False return True # Concordant def nonSingletonsPostprocessing(absoluteBGTruth, nsRegionsBedFileName, nsConcordantFileName, nsDisCordantFileName, kbp=10, print_first=False, genome_annotation_dir=os.path.join(data_base_dir, 'genome-annotation')): """ Define concordant and discordant based on BG-Truth. Return 1-based Cocordant and Discordant regions in bed file Based on only 100% or 0% bg-truth in BS-seq (absoluteBGTruth), we define: Concordant: All CpGs in 10-bp up and down region is same states, such as 0000, or 1111. Discordant: CpGs in the 10-bp region is mixed with fully-methylated (1) and unmethylated(0) state. This kind of preprocessing will have to be done for each studied library separately. Output format for Concordant and Discordant file: chr start end meth_state coverage chr1 123 124 1 16 """ logger.debug(f"nonSingletonsPostprocessing, based on file={nsRegionsBedFileName}, kbp={kbp}") bedBGTruth = BedTool(calldict2txt(absoluteBGTruth), from_string=True).sort() infn = os.path.join(genome_annotation_dir, nsRegionsBedFileName) regionNonsingletons = BedTool(infn).sort() regionWithBGTruth = regionNonsingletons.intersect(bedBGTruth, wa=True, wb=True) # chr start end chr start end . . strand regionDict = defaultdict( list) # key->value, key=region of (chr, start, end), value=list of [f1,f2,etc.] , suche
import rospy import math from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement import humanoid_league_msgs.msg from bitbots_hcm.hcm_dsd.hcm_blackboard import STATE_ANIMATION_RUNNING, STATE_CONTROLLABLE, STATE_FALLEN, STATE_FALLING, \ STATE_HARDWARE_PROBLEM, STATE_MOTOR_OFF, STATE_PENALTY, STATE_PICKED_UP, STATE_RECORD, STATE_SHUT_DOWN, \ STATE_STARTUP, STATE_WALKING, STATE_HCM_OFF, STATE_KICKING from humanoid_league_speaker.speaker import speak from humanoid_league_msgs.msg import Audio class StartHCM(AbstractDecisionElement): """ Initializes HCM. """ def perform(self, reevaluate=False): if self.blackboard.shut_down_request: if self.blackboard.current_state == STATE_HARDWARE_PROBLEM: self.blackboard.current_state = STATE_SHUT_DOWN return "SHUTDOWN_WHILE_HARDWARE_PROBLEM" else: self.blackboard.current_state = STATE_SHUT_DOWN return "SHUTDOWN_REQUESTED" else: if not reevaluate: if not self.is_walkready(): return "NOT_WALKREADY" self.blackboard.current_state = STATE_STARTUP return "RUNNING" def is_walkready(self): """ We check if any joint is has an offset from the walkready pose which is higher than a threshold """ if self.blackboard.current_joint_state is None: return False i = 0 for joint_name in self.blackboard.current_joint_state.name: if joint_name == "HeadPan" or joint_name == "HeadTilt": # we dont care about the head position i += 1 continue if abs(math.degrees(self.blackboard.current_joint_state.position[i]) - self.blackboard.walkready_pose_dict[joint_name]) > self.blackboard.walkready_pose_threshold: return False i += 1 return True def get_reevaluate(self): return True class Stop(AbstractDecisionElement): """ Handles manual stops """ def perform(self, reevaluate=False): if self.blackboard.stopped: # we do an action sequence to go into stop and to stay there return "STOPPED" else: return "FREE" def get_reevaluate(self): return True class Record(AbstractDecisionElement): """ Decides if the robot is currently recording animations """ def perform(self, reevaluate=False): # check if the robot is currently recording animations if self.blackboard.record_active: self.blackboard.current_state = STATE_RECORD return "RECORD_ACTIVE" else: # robot is not recording return "FREE" def get_reevaluate(self): return True class CheckMotors(AbstractDecisionElement): """ Checks if we are getting information from the motors. Since the HCM is not able to work without motor connection, we will stop if there are no values. Needs to be checked before other sensors, since they also need the power to be able to response """ def __init__(self, blackboard, dsd, parameters=None): super(CheckMotors, self).__init__(blackboard, dsd, parameters) self.last_different_msg_time = rospy.Time.from_sec(0) self.had_problem = False def perform(self, reevaluate=False): self.clear_debug_data() if self.blackboard.visualization_active: # we will have no problems with hardware in visualization return "OKAY" # we check if the values are actually changing, since the joint_state controller will publish the same message # even if there is no connection anymore. But we don't want to go directly to hardware error if we just # have a small break, since this can happen often due to loose cabling if self.blackboard.previous_joint_state is not None and self.blackboard.current_joint_state is not None \ and (self.blackboard.previous_joint_state.effort != self.blackboard.current_joint_state.effort \ or self.blackboard.previous_joint_state.position != self.blackboard.current_joint_state.position) \ and not self.blackboard.servo_diag_error: self.last_different_msg_time = self.blackboard.current_time if self.blackboard.simulation_active: # Some simulators will give exact same joint messages which look like errors, so ignore this case if self.blackboard.last_motor_update_time != rospy.Time.from_sec(0): return "OKAY" else: return "MOTORS_NOT_STARTED" # check if we want to turn the motors off after not using them for a longer time if self.blackboard.last_motor_goal_time is not None \ and self.blackboard.current_time.to_sec() - self.blackboard.last_motor_goal_time.to_sec() \ > self.blackboard.motor_off_time: rospy.logwarn_throttle(5, "Didn't recieve goals for " + str( self.blackboard.motor_off_time) + " seconds. Will shut down the motors and wait for commands.") self.publish_debug_data("Time since last motor goals", self.blackboard.current_time.to_sec() - self.blackboard.last_motor_goal_time.to_sec()) self.blackboard.current_state = STATE_MOTOR_OFF # we do an action sequence to turn off the motors and stay in motor off return "TURN_OFF" # see if we get no messages or always the exact same if self.blackboard.current_time.to_sec() - self.last_different_msg_time.to_sec() > 0.1: if self.blackboard.is_power_on: if self.blackboard.current_state == STATE_STARTUP and self.blackboard.current_time.to_sec() - \ self.blackboard.start_time.to_sec() < 10: # we are still in startup phase, just wait and dont complain return "MOTORS_NOT_STARTED" else: # tell that we have a hardware problem self.had_problem = True # wait for motors to connect self.blackboard.current_state = STATE_HARDWARE_PROBLEM return "PROBLEM" else: # we have to turn the motors on return "TURN_ON" if self.had_problem: # had problem before, just tell that this is solved now rospy.loginfo("Motors are now connected. Will resume.") self.had_problem = False # motors are on and we can continue return "OKAY" def get_reevaluate(self): return True class CheckIMU(AbstractDecisionElement): """ Checks if we are getting information from the IMU. Since the HCM can not detect falls without it, we will shut everything down if we dont have an imu. """ def __init__(self, blackboard, dsd, parameters=None): super(CheckIMU, self).__init__(blackboard, dsd, parameters) self.last_msg = None self.last_different_msg_time = rospy.Time.from_sec(0) self.had_problem = False def perform(self, reevaluate=False): if self.blackboard.visualization_active: # In visualization, we do not have an IMU. Therefore, return OKAY to ignore that. return "OKAY" # we will get always the same message if there is no connection, so check if it differs if self.last_msg is not None and self.blackboard.imu_msg is not None \ and not self.last_msg.orientation == self.blackboard.imu_msg.orientation \ and not self.blackboard.imu_diag_error: self.last_different_msg_time = self.blackboard.current_time self.last_msg = self.blackboard.imu_msg if self.blackboard.simulation_active: # Some simulators will give exact same IMU messages which look like errors, so ignore this case if self.last_msg: return "OKAY" else: return "IMU_NOT_STARTED" if self.blackboard.current_time.to_sec() - self.last_different_msg_time.to_sec() > 0.1: if self.blackboard.current_state == STATE_STARTUP and self.blackboard.current_time.to_sec() - \ self.blackboard.start_time.to_sec() < 10: # wait for the IMU to start return "IMU_NOT_STARTED" else: self.blackboard.current_state = STATE_HARDWARE_PROBLEM self.had_problem = True return "PROBLEM" if self.had_problem: # had problem before, just tell that this is solved now rospy.loginfo("IMU is now connected. Will resume.") self.had_problem = False return "OKAY" def get_reevaluate(self): return True class CheckPressureSensor(AbstractDecisionElement): """ Checks connection to pressure sensors. """ def __init__(self, blackboard, dsd, parameters=None): super(CheckPressureSensor, self).__init__(blackboard, dsd, parameters) self.last_pressure_values = None self.last_different_msg_time = rospy.Time.from_sec(0) self.had_problem = False def perform(self, reevaluate=False): if self.blackboard.visualization_active: # no pressure sensors is visualization, but thats okay return "OKAY" if not self.blackboard.pressure_sensors_installed: # no pressure sensors installed, no check necessary return "OKAY" if not self.blackboard.pressure_diag_error: self.last_different_msg_time = self.blackboard.current_time if self.blackboard.current_time.to_sec() - self.last_different_msg_time.to_sec() > 0.1: if self.blackboard.current_state == STATE_STARTUP and self.blackboard.current_time.to_sec() - \ self.blackboard.start_time.to_sec() < 10: # wait for the pressure sensors to start self.blackboard.current_state = STATE_STARTUP return "PRESSURE_NOT_STARTED" else: self.blackboard.current_state = STATE_HARDWARE_PROBLEM return "PROBLEM" if self.had_problem: # had problem before, just tell that this is solved now rospy.loginfo("Pressure sensors are now connected. Will resume.") self.had_problem = False return "OKAY" def get_reevaluate(self): return True class PickedUp(AbstractDecisionElement): """ Decides if the robot is currently picked up """ def perform(self, reevaluate=False): if self.blackboard.visualization_active: return "ON_GROUND" # check if the robot is currently being picked up. foot have no connection to the ground, # but robot is more or less upright (to differentiate from falling) if self.blackboard.pressure_sensors_installed and not self.blackboard.simulation_active and \ sum(self.blackboard.pressures) < 10 and \ abs(self.blackboard.smooth_accel[0]) < self.blackboard.pickup_accel_threshold and \ abs(self.blackboard.smooth_accel[1]) < self.blackboard.pickup_accel_threshold: self.blackboard.current_state = STATE_PICKED_UP if not reevaluate: speak("Picked up!", self.blackboard.speak_publisher, priority=50) # we do an action sequence to go to walkready and stay in picked up state return "PICKED_UP" # robot is not picked up return "ON_GROUND" def get_reevaluate(self): return True class Falling(AbstractDecisionElement): """ Decides if the robot is currently falling and has to act on this """ def perform(self, reevaluate=False): # check if the robot is currently falling falling_direction = self.blackboard.fall_checker.check_falling(self.blackboard.gyro, self.blackboard.quaternion) if self.blackboard.falling_detection_active and falling_direction is not None: self.blackboard.current_state = STATE_FALLING if falling_direction == self.blackboard.fall_checker.FRONT: return "FALLING_FRONT" if falling_direction == self.blackboard.fall_checker.BACK: return "FALLING_BACK" if falling_direction == self.blackboard.fall_checker.LEFT: return "FALLING_LEFT" if falling_direction == self.blackboard.fall_checker.RIGHT: return "FALLING_RIGHT" # robot is not fallen return "NOT_FALLING" def get_reevaluate(self): return True class FallingClassifier(AbstractDecisionElement): def perform(self, reevaluate=False): prediction = self.blackboard.classifier.smooth_classify(self.blackboard.imu_msg, self.blackboard.current_joint_state, self.blackboard.cop_l_msg, self.blackboard.cop_r_msg) if prediction == 0: return "NOT_FALLING" elif prediction == 1: return "FALLING_FRONT" elif prediction == 2: return "FALLING_BACK" elif prediction == 3: return "FALLING_LEFT" elif prediction == 4: return "FALLING_RIGHT" else: return "NOT_FALLING" def get_reevaluate(self): return True class Sitting(AbstractDecisionElement): """ Decides if the robot is sitting (due to sitting down earlier). """ def perform(self, reevaluate=False): if self.blackboard.current_joint_state is None: return "NO" # simple check is looking at knee joint positions # todo can be done more sophisticated if self.blackboard.current_joint_state is None: return "NO" left_knee = 0 right_knee = 0 i = 0 for joint_name in self.blackboard.current_joint_state.name: if joint_name == "LKnee": left_knee = self.blackboard.current_joint_state.position[i] elif joint_name == "RKnee": right_knee = self.blackboard.current_joint_state.position[i] i += 1 if abs(left_knee) > 2.5 and abs(right_knee) > 2.5: return "YES" else: return "NO" def get_reevaluate(self): # we never have to reevaluate since this state of this can only be changed by decisions above it return False class Fallen(AbstractDecisionElement): """ Decides
= structure.get_sorted_structure() # If grid_density is in the kpoints_settings use Kpoints.automatic_density if self.kpoints_settings.get('grid_density'): return Kpoints.automatic_density( structure, int(self.kpoints_settings['grid_density']), self.force_gamma) # If length is in the kpoints_settings use Kpoints.automatic elif self.kpoints_settings.get('length'): return Kpoints.automatic(self.kpoints_settings['length']) # Raise error. Unsure of which kpoint generation to use else: raise ValueError( "Invalid KPoint Generation algo : Supported Keys are " "grid_density: for Kpoints.automatic_density generation " "and length : for Kpoints.automatic generation") def __str__(self): return self.name def __repr__(self): output = [self.name, ""] section_names = ['INCAR settings', 'KPOINTS settings', 'POTCAR settings'] count = 0 for d in [self.incar_settings, self.kpoints_settings, self.potcar_settings]: output.append(section_names[count]) for k, v in d.items(): output.append("%s = %s" % (k, str(v))) output.append("") count += 1 return "\n".join(output) def as_dict(self): config_dict = { "INCAR": self.incar_settings, "KPOINTS": self.kpoints_settings, "POTCAR": self.potcar_settings } return { "name": self.name, "config_dict": config_dict, "hubbard_off": self.hubbard_off, "constrain_total_magmom": self.set_nupdown, "sort_structure": self.sort_structure, "potcar_functional": self.potcar_functional, "@class": self.__class__.__name__, "@module": self.__class__.__module__, } @classmethod def from_dict(cls, d): return cls(d["name"], d["config_dict"], hubbard_off=d.get("hubbard_off", False), constrain_total_magmom=d["constrain_total_magmom"], sort_structure=d.get("sort_structure", True), potcar_functional=d.get("potcar_functional", None)) @staticmethod def from_file(name, filename, **kwargs): """ Creates a DictVaspInputSet from a yaml/json file. Args: name (str): A name for the input set. filename (str): Path to a yaml/json file containing the settings. \*\*kwargs: Same kwargs as in the constructor. Returns: DictVaspInputSet """ return DictVaspInputSet(name, loadfn(filename), **kwargs) MITVaspInputSet = partial(DictVaspInputSet.from_file, "MIT", os.path.join(MODULE_DIR, "MITVaspInputSet.yaml")) """ Standard implementation of VaspInputSet utilizing parameters in the MIT High-throughput project. The parameters are chosen specifically for a high-throughput project, which means in general pseudopotentials with fewer electrons were chosen. Please refer:: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. A high-throughput infrastructure for density functional theory calculations. Computational Materials Science, 2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023 """ MITGGAVaspInputSet = partial(DictVaspInputSet.from_file, "MIT GGA", os.path.join(MODULE_DIR, "MITVaspInputSet.yaml"), hubbard_off=True) """ GGA (no U) version of MITVaspInputSet. """ MITHSEVaspInputSet = partial( DictVaspInputSet.from_file, "MIT HSE", os.path.join(MODULE_DIR, "MITHSEVaspInputSet.yaml")) """ Typical implementation of input set for a HSE run using MIT parameters. """ class MITNEBVaspInputSet(DictVaspInputSet): """ Class for writing NEB inputs. Note that EDIFF is not on a per atom basis for this input set. Args: nimages (int): Number of NEB images (excluding start and ending structures). \*\*kwargs: Other kwargs supported by :class:`DictVaspInputSet`. """ def __init__(self, nimages=8, user_incar_settings=None, **kwargs): #NEB specific defaults defaults = {'IMAGES': nimages, 'IBRION': 1, 'NFREE': 2, 'ISYM': 0, 'LORBIT': 0, 'LCHARG': False} if user_incar_settings: defaults.update(user_incar_settings) DictVaspInputSet.__init__( self, "MIT NEB", loadfn(os.path.join(MODULE_DIR, "MITVaspInputSet.yaml")), user_incar_settings=defaults, ediff_per_atom=False, **kwargs) self.nimages = nimages def _process_structures(self, structures): """ Remove any atom jumps across the cell """ input_structures = structures structures = [input_structures[0]] for s in input_structures[1:]: prev = structures[-1] for i in range(len(s)): t = np.round(prev[i].frac_coords - s[i].frac_coords) if np.sum(t) > 0.5: s.translate_sites([i], t, to_unit_cell=False) structures.append(s) return structures def write_input(self, structures, output_dir, make_dir_if_not_present=True, write_cif=False): """ NEB inputs has a special directory structure where inputs are in 00, 01, 02, .... Args: structures ([Structure]): nimages + 2 structures (including start and end structures). output_dir (str): Directory to output the VASP input files make_dir_if_not_present (bool): Set to True if you want the directory (and the whole path) to be created if it is not present. write_cif (bool): If true, writes a cif along with each POSCAR. """ if len(structures) != self.incar_settings['IMAGES'] + 2: raise ValueError('incorrect number of structures') structures = self._process_structures(structures) if make_dir_if_not_present and not os.path.exists(output_dir): os.makedirs(output_dir) s0 = structures[0] self.get_incar(s0).write_file(os.path.join(output_dir, 'INCAR')) self.get_kpoints(s0).write_file(os.path.join(output_dir, 'KPOINTS')) self.get_potcar(s0).write_file(os.path.join(output_dir, 'POTCAR')) for i, s in enumerate(structures): d = os.path.join(output_dir, str(i).zfill(2)) if make_dir_if_not_present and not os.path.exists(d): os.makedirs(d) self.get_poscar(s).write_file(os.path.join(d, 'POSCAR')) if write_cif: s.to(filename=os.path.join(d, '{}.cif'.format(i))) def as_dict(self): d = super(MITNEBVaspInputSet, self).as_dict() d["nimages"] = self.nimages return d @classmethod def from_dict(cls, d): return cls(user_incar_settings=d.get("user_incar_settings", None), constrain_total_magmom=d["constrain_total_magmom"], sort_structure=d.get("sort_structure", True), hubbard_off=d.get("hubbard_off", False), nimages=d["nimages"]) class MITMDVaspInputSet(DictVaspInputSet): """ Class for writing a vasp md run. This DOES NOT do multiple stage runs. Args: start_temp (int): Starting temperature. end_temp (int): Final temperature. nsteps (int): Number of time steps for simulations. The NSW parameter. time_step (int): The time step for the simulation. The POTIM parameter. Defaults to 2fs. hubbard_off (bool): Whether to turn off Hubbard U. Defaults to *True* (different behavior from standard input sets) for MD runs. spin_polarized (bool): Whether to do spin polarized calculations. The ISPIN parameter. Defaults to False. sort_structure (bool): Whether to sort structure. Defaults to False (different behavior from standard input sets). **kwargs: Other kwargs supported by :class:`DictVaspInputSet`. """ def __init__(self, start_temp, end_temp, nsteps, time_step=2, hubbard_off=True, spin_polarized=False, sort_structure=False, user_incar_settings=None, **kwargs): #MD default settings defaults = {'TEBEG': start_temp, 'TEEND': end_temp, 'NSW': nsteps, 'EDIFF': 0.000001, 'LSCALU': False, 'LCHARG': False, 'LPLANE': False, 'LWAVE': True, 'ICHARG': 0, 'ISMEAR': 0, 'SIGMA': 0.05, 'NELMIN': 4, 'LREAL': True, 'BMIX': 1, 'MAXMIX': 20, 'NELM': 500, 'NSIM': 4, 'ISYM': 0, 'ISIF': 0, 'IBRION': 0, 'NBLOCK': 1, 'KBLOCK': 100, 'SMASS': 0, 'POTIM': time_step, 'PREC': 'Normal', 'ISPIN': 2 if spin_polarized else 1} #override default settings with user supplied settings if user_incar_settings: defaults.update(user_incar_settings) DictVaspInputSet.__init__( self, "MIT MD", loadfn(os.path.join(MODULE_DIR, "MITVaspInputSet.yaml")), hubbard_off=hubbard_off, sort_structure=sort_structure, user_incar_settings=defaults, **kwargs) self.start_temp = start_temp self.end_temp = end_temp self.nsteps = nsteps self.time_step = time_step self.spin_polarized = spin_polarized self.user_incar_settings = user_incar_settings or {} #use VASP default ENCUT if 'ENCUT' not in self.user_incar_settings: del self.incar_settings['ENCUT'] if not spin_polarized: del self.incar_settings['MAGMOM'] def get_kpoints(self, structure): return Kpoints.gamma_automatic() def as_dict(self): d = super(MITMDVaspInputSet, self).as_dict() d.update({ "start_temp": self.start_temp, "end_temp": self.end_temp, "nsteps": self.nsteps, "time_step": self.time_step, "spin_polarized": self.spin_polarized, "user_incar_settings": self.user_incar_settings }) return d @classmethod def from_dict(cls, d): return cls(start_temp=d["start_temp"], end_temp=d["end_temp"], nsteps=d["nsteps"], time_step=d["time_step"], hubbard_off=d.get("hubbard_off", False), user_incar_settings=d["user_incar_settings"], spin_polarized=d.get("spin_polarized", False), constrain_total_magmom=d["constrain_total_magmom"], sort_structure=d.get("sort_structure", True)) MPVaspInputSet = partial(DictVaspInputSet.from_file, "MP", os.path.join(MODULE_DIR, "MPVaspInputSet.yaml")) """ Implementation of VaspInputSet utilizing parameters in the public Materials Project. Typically, the pseudopotentials chosen contain more electrons than the MIT parameters, and the k-point grid is ~50% more dense. The LDAUU parameters are also different due to the different psps used, which result in different fitted values. """ MPGGAVaspInputSet = partial(DictVaspInputSet.from_file, "MP GGA", os.path.join(MODULE_DIR, "MPVaspInputSet.yaml"), hubbard_off=True) """ Same as the MPVaspInput set, but the +U is enforced to be turned off. """ MPHSEVaspInputSet = partial(DictVaspInputSet.from_file, "MP HSE", os.path.join(MODULE_DIR, "MPHSEVaspInputSet.yaml")) """ Same as the MPVaspInput set, but with HSE parameters. """ class MPStaticVaspInputSet(DictVaspInputSet): """ Implementation of VaspInputSet overriding MaterialsProjectVaspInputSet for static calculations that typically follow relaxation runs. It is recommended to use the static from_previous_run method to construct the input set to inherit most of the functions. Args: kpoints_density (int): kpoints density for the reciprocal cell of structure. Might need to increase the default value when calculating metallic materials. sym_prec (float): Tolerance for symmetry finding kwargs: hubbard_off (bool): Whether to turn off Hubbard U if it is specified in config_dict ("MP Static"). Defaults to False, i.e., follow settings in config_dict. user_incar_settings (dict): User INCAR settings. This allows a user to override INCAR settings, e.g., setting a different MAGMOM for various elements or species. constrain_total_magmom (bool): Whether to constrain the total magmom (NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all species. Defaults to False. sort_structure (bool): Whether to sort the structure (using the default sort order of electronegativity) before generating input files. Defaults to True, the behavior you would want most of the time. This ensures that similar atomic species are grouped together. ediff_per_atom (bool): Whether the EDIFF is specified on a per atom basis. """ def __init__(self, kpoints_density=90, sym_prec=0.1, **kwargs): DictVaspInputSet.__init__( self, "MP Static", loadfn(os.path.join(MODULE_DIR, "MPVaspInputSet.yaml")), **kwargs) self.incar_settings.update( {"IBRION": -1, "ISMEAR": -5, "LAECHG": True, "LCHARG": True, "LORBIT": 11, "LVHAR": True, "LWAVE": False, "NSW": 0, "ICHARG": 0, "EDIFF": 0.000001, "ALGO": "Normal"}) self.kpoints_settings.update({"kpoints_density": kpoints_density}) self.sym_prec = sym_prec def get_kpoints(self, structure, primitive_standard=False): """ Get a KPOINTS file using the fully automated grid method. Uses Gamma centered meshes for hexagonal cells and Monk grids otherwise. Args: structure (Structure/IStructure): structure to get kpoints primitive_standard (Bool): whether the input structure is a primitive standardized cell """ if not primitive_standard: structure = self.get_poscar(structure).structure self.kpoints_settings['grid_density'] = \ self.kpoints_settings["kpoints_density"] * \ structure.lattice.reciprocal_lattice.volume * \ structure.num_sites return super(MPStaticVaspInputSet, self).get_kpoints(structure) def get_poscar(self, structure): """ Get a POSCAR file with a primitive standardized cell of the giving structure. Args: structure (Structure/IStructure): structure to get POSCAR """ sym_finder = SpacegroupAnalyzer(structure, symprec=self.sym_prec) return Poscar(sym_finder.get_primitive_standard_structure(False)) @staticmethod def get_structure(vasp_run, outcar=None, initial_structure=False, additional_info=False, sym_prec=0.1): """ Process structure for static calculations from previous run. Args: vasp_run (Vasprun): Vasprun that contains the final structure from previous run. outcar (Outcar): Outcar that contains the magnetization info from previous run. initial_structure (bool):
''' The MIT License (MIT) Copyright © 2021 Opentensor.ai Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' """ DPN nucleus Bittensor endpoint trained on PIL images to detect objects using DPN. """ import argparse import torch import torch.nn as nn import torch.nn.functional as F from types import SimpleNamespace import bittensor from collections.abc import Callable from bittensor.utils.batch_transforms import Normalize class DPNNucleus(torch.nn.Module): """ Bittensor endpoint trained on PIL images to detect objects using an DPN. """ def __init__( self, routing_callback, config: 'bittensor.Config' = None, **kwargs): r""" Init a new DPN nucleus module. Args: config (:obj: `bittensor.Config`, `required`) munch namespace config item. """ super(DPNNucleus, self).__init__() if config == None: config = DPNNucleus.config() DPNNucleus.check_config(config) self.config = config # To be set. self.routing_callback = routing_callback in_planes, out_planes = config.nucleus.in_planes, config.nucleus.out_planes num_blocks, dense_depth = config.nucleus.num_blocks, config.nucleus.dense_depth # Transform Network """ Transform network. Layers take in image inputs normalizes them and applies 4 convolutional layers. Image encoder: transforms PIL-encoded tensors to a common shape. [batch_size, channels, rows, cols] -> [batch_size, -1, -1, -1] Output: [batch_size, self.transform_dim (9728)] """ self.transform = Normalize((0.1307,), (0.3081,), device=self.device) self.adaptive_pool = nn.AdaptiveAvgPool2d((32, 32)) self.transform_conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.transform_bn1 = nn.BatchNorm2d(64) self.last_planes = 64 self.transform_layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1) self.transform_layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2) self.transform_layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=1) self.transform_layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2) self.transform_dim = (out_planes[3] * 4)+(((num_blocks[3]+1) * 4)*dense_depth[3]) # Context layers. """ Distillation model for remote context. This layer takes input coming from transform layer, and runs it through 3 linear layers, projecting it to bittensor.__network_dim__. """ self.context_layer1 = nn.Linear(self.transform_dim, 512) self.context_layer2 = nn.Linear(512, 256) self.context_layer3 = nn.Linear(256, bittensor.__network_dim__) # hidden layer. self.hidden_layer1 = nn.Linear(self.transform_dim + bittensor.__network_dim__, 512) self.hidden_layer2 = nn.Linear(512, 256) self.hidden_layer3 = nn.Linear(256, bittensor.__network_dim__) # Layers to project target down to target size passed by config # (number of classes) self.target_layer1 = nn.Linear(bittensor.__network_dim__, 128) self.target_layer2 = nn.Linear(128, self.config.nucleus.target_dim) self.to(self.device) @staticmethod def config() -> 'bittensor.Config': parser = argparse.ArgumentParser(); DPNNucleus.add_args(parser) config = bittensor.config( parser ); return config @staticmethod def add_args(parser: argparse.ArgumentParser): r""" This function adds the configuration items for the DPN nucleus. These args are use to instantiate a Dual Path model. Instantiating a configuration with the defaults will yield a "shallow" DPN-26 configuration. For deeper network configurations, it is possible to set the num_blocks parameter to (3, 4, 20, 3) for a DPN-92. For DPN-98 set the following: in_planes: (160, 320, 640, 1280) out_planes: (256, 512, 1024, 2048) num_blocks: (3, 6, 20, 3) dense_depth: (16, 32, 32, 128) """ def to_list(arg): return [int(i) for i in arg.split(",")] parser.add_argument('--nucleus.in_planes', default='160, 320, 640, 1280', action="append", type=to_list) parser.add_argument('--nucleus.out_planes', default='256, 512, 1024, 2048', action="append", type=to_list) parser.add_argument('--nucleus.num_blocks', default='3, 6, 20, 3', action="append", type=to_list) parser.add_argument('--nucleus.dense_depth', default='16, 32, 32, 128', action="append", type=to_list) parser.add_argument('--nucleus.target_dim', default=10, type=int, help='Final logit layer dimension. i.e. 10 for CIFAR-10.') @staticmethod def check_config(config: 'bittensor.Config'): assert isinstance(config.nucleus.in_planes, list), 'nucleus.in_planes must be a tuple, got {}'.format(config.nucleus.in_planes) assert isinstance(config.nucleus.out_planes, list), 'nucleus.out_planes must be a tuple, got {}'.format(config.nucleus.out_planes) assert isinstance(config.nucleus.num_blocks, list), 'nucleus.num_blocks must be a tuple, got {}'.format(config.nucleus.num_blocks) assert isinstance(config.nucleus.dense_depth, list), 'nucleus.dense_depth must be a tuple, got {}'.format(config.nucleus.dense_depth) assert all(isinstance(el, int) for el in config.nucleus.in_planes), 'nucleus.in_planes must be a tuple of ints, got {}'.format(config.nucleus.in_planes) assert all(isinstance(el, int) for el in config.nucleus.out_planes), 'nucleus.out_planes must be a tuple of ints, got {}'.format(config.nucleus.out_planes) assert all(isinstance(el, int) for el in config.nucleus.num_blocks), 'nucleus.num_blocks must be a tuple of ints, got {}'.format(config.nucleus.num_blocks) assert all(isinstance(el, int) for el in config.nucleus.dense_depth), 'nucleus.dense_depth must be a tuple of ints, got {}'.format(config.nucleus.dense_depth) def attach_routing_callback(self, routing_callback: Callable[ [torch.Tensor, torch.Tensor], torch.Tensor ] ): """ Assigns the routing_callback call to this neuron. Returns: routing_callback (:callabl:`Callable[ [torch.Tensor, torch.Tensor], torch.Tensor `, `required`): Routing function to call on self.route() """ self.routing_callback = routing_callback @property def route( self, inputs: torch.Tensor, query: torch.Tensor ) -> torch.FloatTensor: """ Calls this nucleus's subscribed routing function. self.routing_callback must be set before this call is made. Args: inputs (:obj:`torch.LongTensor` of shape :obj:`( batch_size, sequence_len )`, `required`): Batch_size length list of tokenized sentences. query (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, query_dimension)`, `required`): Context tensor used to select which neurons to query for each example. Returns: remote_context (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_len, bittensor.__network_dim__)`, `required`): Context from calling remote network. """ if self.routing_callback == None: raise RuntimeError('The routing function must be set on this nucleus before a remote_forward call can execute.') else: return self.routing_callback( inputs = inputs, query = query ) def forward_image ( self, images: torch.Tensor): r""" Forward image inputs through the DPN nucleus . Args: inputs (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_dim, channels, rows, cols)`, `required`): Image tensors produced by calling PIL.toTensor() and with sequence dimension. Returns: hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_dim, bittensor.__network_dim__)`, `required`): Hidden layer encoding produced by using local_context. """ # images: remove sequence dimension from images. # images.shape = [batch_size, channels, rows, cols] images = images.view(images.shape[0] * images.shape[1], images.shape[2], images.shape[3], images.shape[4]) # hidden: hidden layer using local context for local computation only. # hidden.shape = [batch_size, __network_dim__] hidden = self.forward (images = images.to(self.device), remote = False).local_hidden # hidden: re-add sequence dimension to outputs. # hidden.shape = [batch_size, sequence_dim, __network_dim__] hidden = torch.unsqueeze(hidden, 1) return hidden def local_forward ( self, images: torch.Tensor, targets: torch.Tensor = None ) -> SimpleNamespace: r""" Forward pass non-sequential image inputs and targets through the DPN Nucleus. Args: images (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, channels, rows, cols)`, `required`): PIL.toTensor() encoded images. targets (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.target_size)`, `optional`): Image labels. remote (:obj:`bool')`, `optional`): Switch between local and remote context. If true, function makes quries to the remote network. Returns: SimpleNamespace ( local_context (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, bittensor.__network_dim__)`, `required`): Pre-Hidden layer context, trained to match the remote context. local_hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, bittensor.__network_dim__)`, `required`): Hidden layer produced from the context. local_target (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_dim)`, `optional`): FFNN Target predictions using local_context. local_target_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`): FFNN Classification loss using local_context. local_accuracy (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`): Accuracy of target predictions. transform (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, transform_dim)`, `optional`): transformation of various sized images to batch-size transform dim. ) """ # Return vars to be filled. output = SimpleNamespace () r""" Transform the images into a common shape (32x32) """ # transform: transform images to common shape. # transform.shape = [batch_size, self.transform_dim] transform = self.transform(images) transform = self.adaptive_pool(transform) transform = F.relu(self.transform_bn1(self.transform_conv1(transform.detach()))) transform = self.transform_layer1(transform) transform = self.transform_layer2(transform) transform = self.transform_layer3(transform) transform = self.transform_layer4(transform) transform = F.avg_pool2d(transform, 4) output.transform = torch.flatten(transform, start_dim=1) # local_context: distillation model for remote_context. # local_context.shape = [batch_size, bittensor.__network_dim__] local_context = self.context_layer1(output.transform.detach()) local_context = self.context_layer2(local_context) output.local_context = self.context_layer3(local_context) # local_hidden: hidden layer encoding using local_context. # local_hidden.shape = [batch_size, bittensor.__network_dim__] local_hidden = torch.cat([output.transform, output.local_context], dim=1) local_hidden = self.hidden_layer1(local_hidden) local_hidden = self.hidden_layer2(local_hidden) output.local_hidden = self.hidden_layer3(local_hidden) if targets is not None: # local_target: projection of local_hidden onto target dimension. # local_target.shape = [batch_size, target_dim] targets.to(self.device) local_target = self.target_layer1(output.local_hidden) local_target
* self.batch_c * range_c[2] - range_c[2] / 2 # - range_c[2] / 2, so that the last value is ignored which is actually the begining of next batch if range_c_end > range_c[1]: range_c_end = range_c[1] a = np.sqrt(self.dataset[0].system.xyz.cell[0][0]**2+self.dataset[0].system.xyz.cell[0][1]**2+self.dataset[0].system.xyz.cell[0][2]**2) b = np.sqrt(self.dataset[0].system.xyz.cell[1][0]**2+self.dataset[0].system.xyz.cell[1][1]**2+self.dataset[0].system.xyz.cell[1][2]**2) c = np.sqrt(self.dataset[0].system.xyz.cell[2][0]**2+self.dataset[0].system.xyz.cell[2][1]**2+self.dataset[0].system.xyz.cell[2][2]**2) # gen llhpc script with open("opt-abc-%d-%d-%d.slurm" % (i_batch_a, i_batch_b, i_batch_c), 'w') as fout: fout.write("#!/bin/bash\n") fout.write("#SBATCH -p %s\n" % self.run_params["partition"]) fout.write("#SBATCH -N %d\n" % self.run_params["nodes"]) fout.write("#SBATCH -n %d\n" % self.run_params["ntask"]) fout.write("#SBATCH -J %s\n" % self.run_params["jobname"]) fout.write("#SBATCH -o %s\n" % self.run_params["stdout"]) fout.write("#SBATCH -e %s\n" % self.run_params["stderr"]) fout.write("cat > optimization.in<<EOF\n") #self.dataset[0].to_input(fout) self.dataset[0].system.coordtype = "reduced" fout.write(self.dataset[0].to_string()) fout.write("EOF\n") fout.write("cat > optimization.files<<EOF\n") #self.files.name = "optimization.files" self.files.main_in = "optimization.in" self.files.main_out = "optimization.out" self.files.wavefunc_in = "optimization-i" self.files.wavefunc_out = "optimization-o" self.files.tmp = "tmp" #self.files.to_files(fout, self.dataset[0].system) fout.write(self.files.to_string(system=self.dataset[0].system)) fout.write("EOF\n") fout.write("a_in=%f\n" % a) fout.write("b_in=%f\n" % b) fout.write("c_in=%f\n" % c) fout.write("a1=%f\n" % self.dataset[0].system.xyz.cell[0][0]) fout.write("a2=%f\n" % self.dataset[0].system.xyz.cell[0][1]) fout.write("a3=%f\n" % self.dataset[0].system.xyz.cell[0][2]) fout.write("b1=%f\n" % self.dataset[0].system.xyz.cell[1][0]) fout.write("b2=%f\n" % self.dataset[0].system.xyz.cell[1][1]) fout.write("b3=%f\n" % self.dataset[0].system.xyz.cell[1][2]) fout.write("c1=%f\n" % self.dataset[0].system.xyz.cell[2][0]) fout.write("c2=%f\n" % self.dataset[0].system.xyz.cell[2][1]) fout.write("c3=%f\n" % self.dataset[0].system.xyz.cell[2][2]) fout.write("rprim_line=`cat optimization.in | grep -n \'rprim\' | cut -d \":\" -f 1`\n") fout.write("after_rprim_cell_line=`echo \"${rprim_line} + 4\" | bc`\n") fout.write("for a in `seq -w %f %f %f`\n" % (a+range_a_start, range_a[2], a+range_a_end)) fout.write("do\n") fout.write("for b in `seq -w %f %f %f`\n" % (b+range_b_start, range_b[2], b+range_b_end)) fout.write("do\n") fout.write("for c in `seq -w %f %f %f`\n" % (c+range_c_start, range_c[2], c+range_c_end)) fout.write("do\n") fout.write(" mkdir relax-${a}-${b}-${c}\n") fout.write(" cp optimization.files *.psp8 *.GGA_PBE-JTH.xml relax-${a}-${b}-${c}/\n") fout.write(" cat optimization.in | head -n +${rprim_line} > relax-${a}-${b}-${c}/optimization.in\n") fout.write(" vec11=$(printf \"%-.6f\" `echo \"scale=6; result=${a1} * ${a} / ${a_in}; print result\" | bc`)\n") fout.write(" vec12=$(printf \"%-.6f\" `echo \"scale=6; result=${a2} * ${a} / ${a_in}; print result\" | bc`)\n") fout.write(" vec13=$(printf \"%-.6f\" `echo \"scale=6; result=${a3} * ${a} / ${a_in}; print result\" | bc`)\n") fout.write(" vec21=$(printf \"%-.6f\" `echo \"scale=6; result=${b1} * ${b} / ${b_in}; print result\" | bc`)\n") fout.write(" vec22=$(printf \"%-.6f\" `echo \"scale=6; result=${b2} * ${b} / ${b_in}; print result\" | bc`)\n") fout.write(" vec23=$(printf \"%-.6f\" `echo \"scale=6; result=${b3} * ${b} / ${b_in}; print result\" | bc`)\n") fout.write(" vec31=$(printf \"%-.6f\" `echo \"scale=6; result=${c1} * ${c} / ${c_in}; print result\" | bc`)\n") fout.write(" vec32=$(printf \"%-.6f\" `echo \"scale=6; result=${c2} * ${c} / ${c_in}; print result\" | bc`)\n") fout.write(" vec33=$(printf \"%-.6f\" `echo \"scale=6; result=${c3} * ${c} / ${c_in}; print result\" | bc`)\n") fout.write(" cat >> relax-${a}-${b}-${c}/optimization.in<<EOF\n") fout.write("${vec11} ${vec12} ${vec13}\n") fout.write("${vec21} ${vec22} ${vec23}\n") fout.write("${vec31} ${vec32} ${vec33}\n") fout.write("EOF\n") fout.write(" cat optimization.in | tail -n +${after_rprim_cell_line} >> relax-${a}-${b}-${c}/optimization.in\n") fout.write(" cd relax-${a}-${b}-${c}/\n") fout.write(" yhrun $PMF_ABINIT < optimization.files\n") fout.write(" cd ../\n") fout.write("done\n") fout.write("done\n") fout.write("done\n") # gen pbs script with open("opt-abc-%d-%d-%d.pbs" % (i_batch_a, i_batch_b, i_batch_c), 'w') as fout: fout.write("#!/bin/bash\n") fout.write("#PBS -N %s-%d-%d-%d\n" % (self.run_params["jobname"], i_batch_a, i_batch_b, i_batch_c)) fout.write("#PBS -l nodes=%d:ppn=%d\n" % (self.run_params["nodes"], self.run_params["ppn"])) if "queue" in self.run_params and self.run_params["queue"] != None: fout.write("#PBS -q %s\n" %self.run_params["queue"]) fout.write("\n") fout.write("cd $PBS_O_WORKDIR\n") fout.write("NP=`cat $PBS_NODEFILE | wc -l`\n") fout.write("cat > optimization.in<<EOF\n") #self.dataset[0].to_input(fout) self.dataset[0].system.coordtype = "reduced" fout.write(self.dataset[0].to_string()) fout.write("EOF\n") fout.write("cat > optimization.files<<EOF\n") #self.files.name = "optimization.files" self.files.main_in = "optimization.in" self.files.main_out = "optimization.out" self.files.wavefunc_in = "optimization-i" self.files.wavefunc_out = "optimization-o" self.files.tmp = "tmp" #self.files.to_files(fout, self.dataset[0].system) fout.write(self.files.to_string(system=self.dataset[0].system)) fout.write("EOF\n") fout.write("a_in=%f\n" % a) fout.write("b_in=%f\n" % b) fout.write("c_in=%f\n" % c) fout.write("a1=%f\n" % self.dataset[0].system.xyz.cell[0][0]) fout.write("a2=%f\n" % self.dataset[0].system.xyz.cell[0][1]) fout.write("a3=%f\n" % self.dataset[0].system.xyz.cell[0][2]) fout.write("b1=%f\n" % self.dataset[0].system.xyz.cell[1][0]) fout.write("b2=%f\n" % self.dataset[0].system.xyz.cell[1][1]) fout.write("b3=%f\n" % self.dataset[0].system.xyz.cell[1][2]) fout.write("c1=%f\n" % self.dataset[0].system.xyz.cell[2][0]) fout.write("c2=%f\n" % self.dataset[0].system.xyz.cell[2][1]) fout.write("c3=%f\n" % self.dataset[0].system.xyz.cell[2][2]) fout.write("rprim_line=`cat optimization.in | grep -n \'rprim\' | cut -d \":\" -f 1`\n") fout.write("after_rprim_cell_line=`echo \"${rprim_line} + 4\" | bc`\n") fout.write("for a in `seq -w %f %f %f`\n" % (a+range_a_start, range_a[2], a+range_a_end)) fout.write("do\n") fout.write("for b in `seq -w %f %f %f`\n" % (b+range_b_start, range_b[2], b+range_b_end)) fout.write("do\n") fout.write("for c in `seq -w %f %f %f`\n" % (c+range_c_start, range_c[2], c+range_c_end)) fout.write("do\n") fout.write(" mkdir relax-${a}-${b}-${c}\n") fout.write(" cp optimization.files *.psp8 *.GGA_PBE-JTH.xml relax-${a}-${b}-${c}/\n") fout.write(" cat optimization.in | head -n +${rprim_line} > relax-${a}-${b}-${c}/optimization.in\n") fout.write(" vec11=$(printf \"%-.6f\" `echo \"scale=6; result=${a1} * ${a} / ${a_in}; print result\" | bc`)\n") fout.write(" vec12=$(printf \"%-.6f\" `echo \"scale=6; result=${a2} * ${a} / ${a_in}; print result\" | bc`)\n") fout.write(" vec13=$(printf \"%-.6f\" `echo \"scale=6; result=${a3} * ${a} / ${a_in}; print result\" | bc`)\n") fout.write(" vec21=$(printf \"%-.6f\" `echo \"scale=6; result=${b1} * ${b} / ${b_in}; print result\" | bc`)\n") fout.write(" vec22=$(printf \"%-.6f\" `echo \"scale=6; result=${b2} * ${b} / ${b_in}; print result\" | bc`)\n") fout.write(" vec23=$(printf \"%-.6f\" `echo \"scale=6; result=${b3} * ${b} / ${b_in}; print result\" | bc`)\n") fout.write(" vec31=$(printf \"%-.6f\" `echo \"scale=6; result=${c1} * ${c} / ${c_in}; print result\" | bc`)\n") fout.write(" vec32=$(printf \"%-.6f\" `echo \"scale=6; result=${c2} * ${c} / ${c_in}; print result\" | bc`)\n") fout.write(" vec33=$(printf \"%-.6f\" `echo \"scale=6; result=${c3} * ${c} / ${c_in}; print result\" | bc`)\n") fout.write(" cat >> relax-${a}-${b}-${c}/optimization.in<<EOF\n") fout.write("${vec11} ${vec12} ${vec13}\n") fout.write("${vec21} ${vec22} ${vec23}\n") fout.write("${vec31} ${vec32} ${vec33}\n") fout.write("EOF\n") fout.write(" cat optimization.in | tail -n +${after_rprim_cell_line} >> relax-${a}-${b}-${c}/optimization.in\n") fout.write(" cd relax-${a}-${b}-${c}/\n") fout.write(" mpirun -np $NP -machinefile $PBS_NODEFILE $PMF_ABINIT < optimization.files\n") fout.write(" cd ../\n") fout.write("done\n") fout.write("done\n") fout.write("done\n") # gen local bash script with open("opt-abc-%d-%d-%d.sh" % (i_batch_a, i_batch_b, i_batch_c), 'w') as fout: fout.write("#!/bin/bash\n") fout.write("cat > optimization.in<<EOF\n") #self.dataset[0].to_input(fout) self.dataset[0].system.coordtype = "reduced" fout.write(self.dataset[0].to_string()) fout.write("EOF\n") fout.write("cat > optimization.files<<EOF\n") #self.files.name = "optimization.files" self.files.main_in = "optimization.in" self.files.main_out = "optimization.out" self.files.wavefunc_in = "optimization-i" self.files.wavefunc_out = "optimization-o" self.files.tmp = "tmp" #self.files.to_files(fout, self.dataset[0].system) fout.write(self.files.to_string(system=self.dataset[0].system)) fout.write("EOF\n") fout.write("a_in=%f\n" % a) fout.write("b_in=%f\n" % b) fout.write("c_in=%f\n" % c) fout.write("a1=%f\n" % self.dataset[0].system.xyz.cell[0][0]) fout.write("a2=%f\n" % self.dataset[0].system.xyz.cell[0][1]) fout.write("a3=%f\n" % self.dataset[0].system.xyz.cell[0][2]) fout.write("b1=%f\n" % self.dataset[0].system.xyz.cell[1][0]) fout.write("b2=%f\n" % self.dataset[0].system.xyz.cell[1][1]) fout.write("b3=%f\n" % self.dataset[0].system.xyz.cell[1][2]) fout.write("c1=%f\n" % self.dataset[0].system.xyz.cell[2][0]) fout.write("c2=%f\n" % self.dataset[0].system.xyz.cell[2][1]) fout.write("c3=%f\n" % self.dataset[0].system.xyz.cell[2][2]) fout.write("rprim_line=`cat optimization.in | grep -n \'rprim\' | cut -d \":\" -f 1`\n") fout.write("after_rprim_cell_line=`echo \"${rprim_line} + 4\" | bc`\n") fout.write("for a in `seq -w %f %f %f`\n" % (a+range_a_start, range_a[2], a+range_a_end)) fout.write("do\n") fout.write("for b in `seq -w %f %f %f`\n" % (b+range_b_start, range_b[2], b+range_b_end)) fout.write("do\n") fout.write("for c in `seq -w %f %f %f`\n" % (c+range_c_start, range_c[2], c+range_c_end)) fout.write("do\n") fout.write(" mkdir relax-${a}-${b}-${c}\n") fout.write(" cp optimization.files *.psp8 *.GGA_PBE-JTH.xml relax-${a}-${b}-${c}/\n") fout.write(" cat optimization.in | head -n +${rprim_line} > relax-${a}-${b}-${c}/optimization.in\n") fout.write(" vec11=$(printf \"%-.6f\" `echo \"scale=6; result=${a1} * ${a} / ${a_in}; print result\" | bc`)\n") fout.write(" vec12=$(printf \"%-.6f\" `echo \"scale=6; result=${a2} * ${a} / ${a_in}; print result\" | bc`)\n") fout.write(" vec13=$(printf \"%-.6f\" `echo \"scale=6; result=${a3} * ${a} / ${a_in}; print result\" | bc`)\n") fout.write(" vec21=$(printf \"%-.6f\" `echo \"scale=6; result=${b1} * ${b} / ${b_in}; print result\" | bc`)\n") fout.write(" vec22=$(printf \"%-.6f\" `echo \"scale=6; result=${b2} * ${b} / ${b_in}; print result\" | bc`)\n") fout.write(" vec23=$(printf \"%-.6f\" `echo \"scale=6; result=${b3} * ${b} / ${b_in}; print result\" | bc`)\n") fout.write(" vec31=$(printf \"%-.6f\" `echo \"scale=6; result=${c1} * ${c} / ${c_in}; print result\" | bc`)\n") fout.write(" vec32=$(printf \"%-.6f\" `echo \"scale=6; result=${c2} * ${c} / ${c_in}; print result\" | bc`)\n") fout.write(" vec33=$(printf \"%-.6f\" `echo \"scale=6; result=${c3} * ${c} / ${c_in}; print result\" | bc`)\n") fout.write(" cat >> relax-${a}-${b}-${c}/optimization.in<<EOF\n") fout.write("${vec11} ${vec12} ${vec13}\n") fout.write("${vec21} ${vec22} ${vec23}\n") fout.write("${vec31} ${vec32} ${vec33}\n") fout.write("EOF\n") fout.write(" cat optimization.in | tail -n +${after_rprim_cell_line} >> relax-${a}-${b}-${c}/optimization.in\n") fout.write(" cd relax-${a}-${b}-${c}/\n") fout.write(" %s $PMF_ABINIT < optimization.files\n" % (self.run_params["mpi"])) fout.write(" cd ../\n") fout.write("done\n") fout.write("done\n") fout.write("done\n") # gen lsf_sz script with open("opt-abc-%d-%d-%d.lsf_sz" % (i_batch_a, i_batch_b, i_batch_c), 'w') as fout: fout.write("#!/bin/bash\n") fout.write("APP_NAME=%s\n" % self.run_params["queue"]) fout.write("NP=%d\n" % (self.run_params["nodes"]*self.run_params["ppn"])) fout.write("NP_PER_NODE=%d\n" % self.run_params["ppn"]) fout.write("RUN=\"RAW\"\n") fout.write("CURDIR=$PWD\n") fout.write("#VASP=/home-yg/Soft/Vasp5.4/vasp_std\n") fout.write("source /home-yg/env/intel-12.1.sh\n") fout.write("source /home-yg/env/openmpi-1.6.5-intel.sh\n") fout.write("cd $CURDIR\n") fout.write("# starting creating ./nodelist\n") fout.write("rm -rf $CURDIR/nodelist >& /dev/null\n") fout.write("for i in `echo $LSB_HOSTS`\n") fout.write("do\n") fout.write(" echo \"$i\" >> $CURDIR/nodelist \n") fout.write("done\n") fout.write("ndoelist=$(cat $CURDIR/nodelist | uniq | awk \'{print $1}\' | tr \'\n\' \',\')\n") fout.write("cat > optimization.in<<EOF\n") #self.dataset[0].to_input(fout) self.dataset[0].system.coordtype = "reduced" fout.write(self.dataset[0].to_string()) fout.write("EOF\n") fout.write("cat > optimization.files<<EOF\n") #self.files.name = "optimization.files" self.files.main_in = "optimization.in" self.files.main_out = "optimization.out" self.files.wavefunc_in = "optimization-i" self.files.wavefunc_out = "optimization-o" self.files.tmp = "tmp" #self.files.to_files(fout, self.dataset[0].system) fout.write(self.files.to_string(system=self.dataset[0].system)) fout.write("EOF\n") fout.write("a_in=%f\n" % a) fout.write("b_in=%f\n" % b) fout.write("c_in=%f\n" % c) fout.write("a1=%f\n" % self.dataset[0].system.xyz.cell[0][0]) fout.write("a2=%f\n" % self.dataset[0].system.xyz.cell[0][1]) fout.write("a3=%f\n" % self.dataset[0].system.xyz.cell[0][2]) fout.write("b1=%f\n" % self.dataset[0].system.xyz.cell[1][0]) fout.write("b2=%f\n" % self.dataset[0].system.xyz.cell[1][1]) fout.write("b3=%f\n" % self.dataset[0].system.xyz.cell[1][2]) fout.write("c1=%f\n" % self.dataset[0].system.xyz.cell[2][0]) fout.write("c2=%f\n" % self.dataset[0].system.xyz.cell[2][1]) fout.write("c3=%f\n" % self.dataset[0].system.xyz.cell[2][2]) fout.write("rprim_line=`cat optimization.in | grep -n \'rprim\' | cut -d \":\" -f 1`\n") fout.write("after_rprim_cell_line=`echo \"${rprim_line} + 4\" | bc`\n") fout.write("for a in `seq -w %f %f %f`\n" % (a+range_a_start, range_a[2], a+range_a_end)) fout.write("do\n") fout.write("for b in `seq -w %f %f %f`\n" % (b+range_b_start, range_b[2],
Science.""" @staticmethod def get_identifiers(data: dict) -> Dict[str, Any]: """Extract identifier information. :param data: dictionary of web response. :return: Identifier record. """ recognised_types = { "issn", "eissn", "isbn", "eisbn", "art_no", "meeting_abs", "xref_doi", "parent_book_doi", "doi", } field = {rtype: None for rtype in recognised_types} field["uid"] = data["UID"] try: identifiers = data["dynamic_data"]["cluster_related"]["identifiers"] identifier = get_as_list(identifiers, "identifier") for entry in identifier: type_ = entry["@type"] value = entry["@value"] if type_ in recognised_types: field[type_] = value except: pass return field @staticmethod def get_pub_info(data: dict) -> Dict[str, Any]: """Extract publication information fields. :param data: dictionary of web response. :return: Publication info record. """ field = { "sort_date": None, "pub_type": None, "page_count": None, "source": None, "doc_type": None, "publisher": None, "publisher_city": None, } try: summary = data["static_data"]["summary"] if "pub_info" in summary: pub_info = summary["pub_info"] field["sort_date"] = pub_info["@sortdate"] field["pub_type"] = pub_info["@pubtype"] field["page_count"] = int(pub_info["page"]["@page_count"]) if "publishers" in summary and "publisher" in summary["publishers"]: publisher = summary["publishers"]["publisher"] field["publisher"] = publisher["names"]["name"]["full_name"] field["publisher_city"] = publisher["address_spec"]["city"] if "titles" in summary and "title" in summary["titles"]: titles = get_as_list(summary["titles"], "title") for title in titles: if title["@type"] == "source": field["source"] = title["#text"] break if "doctypes" in summary: doctypes = get_as_list(summary["doctypes"], "doctype") field["doc_type"] = doctypes[0] except: pass return field @staticmethod def get_title(data: dict) -> Union[None, str]: """Extract title. May raise exception on error. :param data: dictionary of web response. :return: String of title or None if not found. """ try: for entry in data["static_data"]["summary"]["titles"]["title"]: if "@type" in entry and entry["@type"] == "item" and "#text" in entry: return entry["#text"] except: return None @staticmethod def get_names(data: dict) -> List[Dict[str, Any]]: """Extract names fields. :param data: dictionary of web response. :return: List of name records. """ field = list() try: data["static_data"]["summary"]["names"] attrib = WosNameAttributes(data) names = get_as_list(data["static_data"]["summary"]["names"], "name") for name in names: entry = dict() entry["seq_no"] = int(get_entry_or_none(name, "@seq_no")) entry["role"] = get_entry_or_none(name, "@role") entry["first_name"] = get_entry_or_none(name, "first_name") entry["last_name"] = get_entry_or_none(name, "last_name") entry["wos_standard"] = get_entry_or_none(name, "wos_standard") entry["daisng_id"] = get_entry_or_none(name, "@daisng_id") entry["full_name"] = get_entry_or_none(name, "full_name") # Get around errors / booby traps for name retrieval first_name = entry["first_name"] last_name = entry["last_name"] full_name = f"{first_name} {last_name}" entry["orcid"] = attrib.get_orcid(full_name) entry["r_id"] = attrib.get_r_id(full_name) field.append(entry) except: pass return field @staticmethod def get_languages(data: dict) -> List[Dict[str, str]]: """Extract language fields. :param data: dictionary of web response. :return: List of language records. """ lang_list = list() try: data["static_data"]["fullrecord_metadata"]["languages"] except: return lang_list languages = get_as_list(data["static_data"]["fullrecord_metadata"]["languages"], "language") for entry in languages: lang_list.append({"type": entry["@type"], "name": entry["#text"]}) return lang_list @staticmethod def get_refcount(data: dict) -> Union[int, None]: """Extract reference count. :param data: dictionary of web response. :return: Reference count. """ try: refcount = int(data["static_data"]["fullrecord_metadata"]["refs"]["@count"]) return refcount except: return None @staticmethod def get_abstract(data: dict) -> List[str]: """Extract abstracts. :param data: dictionary of web response. :return: List of abstracts. """ abstract_list = list() try: abstracts = get_as_list(data["static_data"]["fullrecord_metadata"]["abstracts"], "abstract") for abstract in abstracts: texts = get_as_list(abstract["abstract_text"], "p") for text in texts: abstract_list.append(text) except: pass return abstract_list @staticmethod def get_keyword(data: dict) -> List[str]: """Extract keywords. Will also get the keywords from keyword plus if they are available. :param data: dictionary of web response. :return: List of keywords. """ keywords = list() try: keywords = get_as_list(data["static_data"]["fullrecord_metadata"]["keywords"], "keyword") if "item" in data["static_data"] and "keywords_plus" in data["static_data"]["item"]: plus = get_as_list(data["static_data"]["item"]["keywords_plus"], "keyword") keywords = keywords + plus except: pass return keywords @staticmethod def get_conference(data: dict) -> List[Dict[str, Any]]: """Extract conference information. :param data: dictionary of web response. :return: List of conferences. """ conferences = list() try: conf_list = get_as_list(data["static_data"]["summary"]["conferences"], "conference") for conf in conf_list: conference = dict() conference["id"] = get_entry_or_none(conf, "@conf_id") if conference["id"] is not None: conference["id"] = int(conference["id"]) conference["name"] = None if "conf_titles" in conf and "conf_title" in conf["conf_titles"]: titles = get_as_list(conf["conf_titles"], "conf_title") conference["name"] = titles[0] conferences.append(conference) except: pass return conferences @staticmethod def get_orgs(data: dict) -> list: """Extract the organisation information. :param data: dictionary of web response. :return: list of organisations or None """ orgs = list() try: addr_list = get_as_list(data["static_data"]["fullrecord_metadata"]["addresses"], "address_name") except: return orgs for addr in addr_list: spec = addr["address_spec"] org = dict() org["city"] = get_entry_or_none(spec, "city") org["state"] = get_entry_or_none(spec, "state") org["country"] = get_entry_or_none(spec, "country") if "organizations" not in addr["address_spec"]: orgs.append(org) return orgs org_list = get_as_list(addr["address_spec"]["organizations"], "organization") org["org_name"] = org_list[0] if len(org_list) > 0 else None for entry in org_list: if isinstance(entry, dict) and "@pref" in entry and entry["@pref"] == "Y": org["org_name"] = entry["#text"] break if "suborganizations" in addr["address_spec"]: org["suborgs"] = get_as_list(addr["address_spec"]["suborganizations"], "suborganization") if "names" in addr and "name" in addr["names"]: names = get_as_list(addr["names"], "name") names_list = list() for name in names: entry = dict() entry["first_name"] = get_entry_or_none(name, "first_name") entry["last_name"] = get_entry_or_none(name, "last_name") entry["daisng_id"] = get_entry_or_none(name, "@daisng_id") entry["full_name"] = get_entry_or_none(name, "full_name") entry["wos_standard"] = get_entry_or_none(name, "wos_standard") names_list.append(entry) org["names"] = names_list orgs.append(org) return orgs @staticmethod def get_fund_ack(data: dict) -> dict: """Extract funding acknowledgements. :param data: dictionary of web response. :return: Funding acknowledgement information. """ fund_ack = dict() fund_ack["text"] = list() fund_ack["grants"] = list() try: entry = data["static_data"]["fullrecord_metadata"]["fund_ack"] if "fund_text" in entry and "p" in entry["fund_text"]: fund_ack["text"] = get_as_list(entry["fund_text"], "p") grants = get_as_list(entry["grants"], "grant") except: return fund_ack for grant in grants: grant_info = dict() grant_info["agency"] = get_entry_or_none(grant, "grant_agency") grant_info["ids"] = list() if "grant_ids" in grant: grant_info["ids"] = get_as_list(grant["grant_ids"], "grant_id") fund_ack["grants"].append(grant_info) return fund_ack @staticmethod def get_categories(data: dict) -> dict: """Extract categories. :param data: dictionary of web response. :return: categories dictionary. """ category_info = dict() try: entry = data["static_data"]["fullrecord_metadata"]["category_info"] except: return category_info entry = data["static_data"]["fullrecord_metadata"]["category_info"] category_info["headings"] = get_as_list_or_none(entry, "headings", "heading") category_info["subheadings"] = get_as_list_or_none(entry, "subheadings", "subheading") subject_list = list() subjects = get_as_list_or_none(entry, "subjects", "subject") for subject in subjects: subject_dict = dict() subject_dict["ascatype"] = get_entry_or_none(subject, "@ascatype") subject_dict["code"] = get_entry_or_none(subject, "@code") subject_dict["text"] = get_entry_or_none(subject, "#text") subject_list.append(subject_dict) category_info["subjects"] = subject_list return category_info @staticmethod def parse_json(*, data: dict, harvest_datetime: str, release_date: str, institution_ids: List[str]) -> dict: """Turn json data into db schema format. :param data: dictionary of web response. :param harvest_datetime: isoformat string of time the fetch took place. :param release_date: Dataset release date. :param institution_ids: List of institution ids used in the query. :return: dict of data in right field format. """ entry = dict() entry["harvest_datetime"] = harvest_datetime entry["release_date"] = release_date entry["identifiers"] = WosJsonParser.get_identifiers(data) entry["pub_info"] = WosJsonParser.get_pub_info(data) entry["title"] = WosJsonParser.get_title(data) entry["names"] = WosJsonParser.get_names(data) entry["languages"] = WosJsonParser.get_languages(data) entry["ref_count"] = WosJsonParser.get_refcount(data) entry["abstract"] = WosJsonParser.get_abstract(data) entry["keywords"] = WosJsonParser.get_keyword(data) entry["conferences"] = WosJsonParser.get_conference(data) entry["fund_ack"] = WosJsonParser.get_fund_ack(data) entry["categories"] = WosJsonParser.get_categories(data) entry["orgs"] = WosJsonParser.get_orgs(data) entry["institution_ids"] = institution_ids return entry class WebOfScienceRelease(SnapshotRelease): API_URL = "http://scientific.thomsonreuters.com" EXPECTED_SCHEMA = "http://scientific.thomsonreuters.com/schema/wok5.4/public/FullRecord" def __init__( self, *, dag_id: str, release_date: pendulum.DateTime, login: str, password: str, institution_ids: List[str], earliest_date: pendulum.DateTime, ): """Construct an UnpaywallSnapshotRelease instance. :param dag_id: The DAG ID. :param release_date: Release date. :param login: WoS login. :param password: <PASSWORD>. :param institution_ids: List of institution IDs to query. :param earliest_date: Earliest date to query from. """ super().__init__( dag_id=dag_id, release_date=release_date, ) self.table_id = WebOfScienceTelescope.DAG_ID self.login = login self.password = password self.institution_ids = institution_ids self.earliest_date = earliest_date def download(self): """Download a Web of Science live snapshot.""" self.harvest_datetime = pendulum.now("UTC") schedule = build_schedule(self.earliest_date, self.release_date) WosUtility.download_wos_parallel( login=self.login, password=<PASSWORD>, schedule=schedule, conn=self.dag_id, institution_ids=self.institution_ids, download_dir=self.download_folder, ) def transform(self): """Convert the XML response into BQ friendly jsonlines.""" for xml_file in self.download_files: records = self._transform_xml_to_json(xml_file) harvest_datetime = self._get_harvest_datetime(xml_file) entries = self._transform_to_db_format(records=records, harvest_datetime=harvest_datetime) self._write_transform_files(entries=entries, xml_file=xml_file) def _schema_check(self, schema: str): """Check that the schema hasn't changed. Throw on different schema. :param schema: Schema string from HTTP response. """ if schema != WebOfScienceRelease.EXPECTED_SCHEMA: raise AirflowException( f"Schema change detected. Expected: {WebOfScienceRelease.EXPECTED_SCHEMA}, received: {schema}" ) def _get_harvest_datetime(self, filepath: str) -> str: """Get the harvest datetime from the filename. <startdate>_<enddate>_<page>_<timestamp>.xml :param filepath: XML file path. :return: Harvest datetime string. """ filename = os.path.basename(filepath) file_tokens = filename.split("_") return file_tokens[3][:-4] def _transform_xml_to_json(self, xml_file: str) -> Union[dict, list]: """Transform XML response to JSON. Throw if schema has changed. :param xml_file: XML file of the API response. :return: Converted dict or list of the response. """ xml_data = load_file(xml_file) records, schema = WosUtility.parse_query(xml_data) self._schema_check(schema) return records def _transform_to_db_format(self, records: list, harvest_datetime: str) -> List[dict]: """Convert the json response to the expected schema. :param records: List of the records as json. :param harvest_datetime: Timestamp of when the API call was made. :return:
'@odata.type': tval = item["@odata.type"].split('#') tval = tval[-1].split('.')[:-1] tval = '.'.join(tval) instances.append(tval) elif item: instances.append(item[instance._typestring]) return instances def getprops(self, selector=None, props=None, nocontent=None, \ skipnonsetting=True, remread=False, insts=None): """Gets properties from a specified selector. If no selector is specified, uses the selector property in the app class. Instead of a selector a list of instances to search can be used instead. If both **selector** and **insts** are passed, **insts** is used. Specific values for multi-level dictionaries can be returned by passing each key separated by a "/" Ex: Key/Sub-Key/Sub-Sub-Key :param selector: The type selection for the get operation. :type selector: str :param skipnonsetting: Flag to remove non settings path. :type skipnonsetting: bool :param nocontent: Keys not found are added to the list provided. :type nocontent: list :param remread: Flag to remove readonly properties. :type remread: bool :param props: The keys to search for within current selection. :type props: list :param insts: List of RisMonolithMemberv100 to be searched for specific keys. :type insts: list :returns: A list of properties found in dictionary form. :rtype: list """ results = list() nocontent = set() if nocontent is None else nocontent if props: noprop = {prop:False for prop in props} if props else {} instances = insts if insts else self._getinstances(selector=selector) instances = skipnonsettingsinst(instances) if skipnonsetting else instances if not instances or len(instances) == 0: raise NothingSelectedError() for instance in instances: currdict = instance.dict for patch in instance.patches: currdict = jsonpatch.apply_patch(currdict, patch) _ = self.removereadonlyprops(currdict, emptyraise=True) if remread else None temp_dict = dict() if props: if isinstance(props, six.string_types): props = [props] for prop in props: copydict = copy.deepcopy(currdict) propsdict = navigatejson(prop.split('/'), copydict) if propsdict is None: continue noprop[prop] = True merge_dict(temp_dict, propsdict) if temp_dict: results.append(temp_dict) else: results.append(currdict) if props: _ = [nocontent.add(prop) for prop in props if not noprop[prop]] return results def info(self, selector=None, props=None, dumpjson=True, latestschema=False): """Gets schema information for properties from a specified selector. If no selector is specified, uses the selector property in the app class. If no properties are specified the entire schema dictionary is returned in a list. :param selector: The type selection for the info operation. :type selector: str :param props: The keys to gather schema data for within current selection. :type props: str or list :param dumpjson: Flag to determine if output should be human readable or json schema. :type dumpjson: bool :param latestschema: Flag to determine if we should drop the schema version when we try to match schema information. If True, the version will be dropped. :type latestschema: bool :returns: A list of property schema information if dumpjson is True or string if dumpjson is False. :rtype: list or string """ model = None outdata = '' nokey = False results = None typestring = self.typepath.defs.typestring iloversion = self.getiloversion() if not iloversion: return results instances = self._getinstances(selector) attributeregistry = getattributeregistry(instances) instances = skipnonsettingsinst(instances) if not instances or len(instances) == 0: raise NothingSelectedError() for inst in instances: bsmodel = None currdict = inst.resp.dict proppath = inst.resp.getheader('Link').split(';')[0].strip('<>') \ if inst.resp.getheader('Link') else None seldict = {} if not props: model, bsmodel = self.get_model(currdict, attributeregistry, \ latestschema, proppath=proppath) results = model break if isinstance(props, six.string_types): props = props.split('/') if '/' in props else props props = [props] if not isinstance(props, (list, tuple)) else props seldict = navigatejson(props, copy.deepcopy(currdict)) if seldict is None: nokey = True continue if self.typepath.defs.typestring in currdict: seldict[typestring] = currdict[typestring] model, bsmodel = self.get_model(currdict, \ attributeregistry, latestschema, newarg=props[:-1], \ proppath=proppath) if not model and not bsmodel: errmsg = "/".join(props) warning_handler("Unable to locate registry model or "\ "No data available for entry: {}\n".format(errmsg)) continue found = model.get_validator(props[-1]) if model else None found = bsmodel.get_validator(props[-1]) if not found and bsmodel else found outdata = found if found and dumpjson else \ found.print_help(props[-1]) if found else outdata if outdata or results: return outdata if outdata else results errmsg = "Entry {} not found in current selection\n".format("/".\ join(props)) if nokey else "Entry {} not found in current"\ " selection\n".format("/".join(props)) warning_handler(errmsg) def loadset(self, seldict=None, fltrvals=(None, None), diffonly=False,\ latestschema=False, uniqueoverride=False, selector=None): """Creates json patches in monolith if the supplied dictionary passes schema validation. In the event schemas are unavailable the patches are always added. Patches that are created this way are not sent to the server until the :meth:`commit` function is called, sending the patches to the server. A list of patches that have not been sent to the server can be returned with the :meth:`status` function. :param selector: The type selection for the loadset operation. :type selector: str :param seldict: Dictionary with the patches to apply to the selected instances. :type seldict: dict :param fltrvals: The filter values for the operation (Key,Val). If a selector returns multiple instances fltrvals can filter the instances by a key/value pair, limiting the returned instances to the one you want. If no filter is supplied the patch dictionary will be applied to all instances. :type fltrvals: tuple :param latestschema: Flag to determine if we should drop the schema version when we try to match schema information. If True, the version will be dropped. :type latestschema: bool :param diffonly: flag to differentiate only existing properties. :type diffonly: bool :param uniqueoverride: Flag to determine if system unique properties should also be patched. If this is True, then unique properties will be patched. :type uniqueoverride: bool :returns: returns a list of properties that have successfully been set """ results = list() nochangesmade = False settingskipped = [False] selector = self.selector if not selector else selector instances = self.select(selector=selector, fltrvals=fltrvals) attributeregistry = getattributeregistry(instances=instances) instances = skipnonsettingsinst(instances=instances) if not instances or len(instances) == 0: raise NothingSelectedSetError() for instance in instances: if validate_headers(instance, verbose=self.verbose): continue else: nochangesmade = True currdict = instance.resp.dict diff_resp = diffdict(newdict=copy.deepcopy(seldict),\ oridict=copy.deepcopy(currdict), settingskipped=settingskipped) iloversion = self.getiloversion() if iloversion: proppath = instance.resp.getheader('Link').split(';')[0].\ strip('<>') if instance.resp.getheader('Link') \ else None try: self._validatechanges(instance=instance, attributeregistry=attributeregistry,\ newdict=diff_resp, oridict=currdict, \ unique=uniqueoverride, latestschema=latestschema, \ proppath=proppath) except SchemaValidationError: LOGGER.error("Cannot validate changes, error found in schema.") patches = jsonpatch.make_patch(currdict, diff_resp) if patches: torem = [] _ = [torem.append(patch) for patch in patches.patch if patch["op"] == "remove"] _ = [patches.patch.remove(patch) for patch in torem] for ind, item in enumerate(instance.patches): ppath = item.patch[0]["path"] if hasattr(item, "patch") else item[0]["path"] # ppath = ["path"](getattr(item, "patch"), item)[0]["path"] jpath = jsonpointer.JsonPointer(ppath.lower()) jval = jpath.resolve(seldict, default='kasjdk?!') if not jval == 'kasjdk?!': del instance.patches[ind] if patches: for patch in patches.patch: forprint = patch["value"] if "value" in patch\ else (patch["op"] + " " + patch["from"]) results.append({patch["path"][1:]:forprint}) self.monolith.path(instance.path).patches.append(patches) else: nochangesmade = True if not nochangesmade: return results elif settingskipped[0] is True: raise LoadSkipSettingError() else: return results def status(self): """Returns all pending changes that have not been committed yet.""" iloversion = self.getiloversion() finalresults = list() monolith = self.monolith (_, _) = self.get_selection(setenable=True) attrreg = getattributeregistry([ele for ele in monolith.iter() if ele]) for instance in monolith.iter(): results = list() if not(instance.patches and len(instance.patches) > 0): continue for item in instance.patches: if isinstance(item, list): results.extend(jsonpatch.JsonPatch(item)) else: results.extend(item) currdict = instance.resp.dict itemholder = list() for mainitem in results: item = copy.deepcopy(mainitem) if iloversion: _, bsmodel = self.get_model(currdict, attrreg) if bsmodel: prop = item["path"][1:].split('/')[-1] validator = bsmodel.get_validator(prop) if validator: if isinstance(validator, redfish.ris.\ validation.PasswordValidator): item["value"] = "******" itemholder.append(item) if itemholder: finalresults.append({instance.maj_type+'('+instance.path+')': itemholder}) return finalresults def commit(self): """Applies all pending json patches to the server. :yields: Two strings. 1. Path being PATCHed 2. True if an error occurred during the PATCH, False if no error. """ instances = [inst for inst in self.monolith.iter() if inst.patches] if not instances or len(instances) == 0: raise NothingSelectedError() for instance in instances: if validate_headers(instance, verbose=self.verbose): continue currdict = dict() oridict = instance.resp.dict totpayload = dict() # apply patches to represent current edits for patches in instance.patches: if self._iloversion < 5.130: self._checkforetagchange(instance=instance) fulldict = jsonpatch.apply_patch(oridict, patches) for patch in patches: currdict = copy.deepcopy(fulldict) patchpath = patch["path"] pobj = jsonpointer.JsonPointer(patchpath) indpayloadcount = 0 for item in pobj.parts: payload = pobj.walk(currdict, item) indpayloadcount =
<reponame>Alex-Lian/openctest<filename>core/generate_value/gutil.py import re import random import config import sys NONE = "NOTYPE" INT = "INT" FLOAT = "FLOAT" BOOL = "BOOL" FILEPATH = "FILEPATH" IP = "IP" PORT = "PORT" IPPORT = "IPPORT" CLASSNAME = "CLASSNAME" DIRPATH = "DIRPATH" INTLIST = "INTLIST" STRLIST = "STRLIST" TIME = "TIME" DATA = "DATA" PM = "PM" PC = "PC" ZKDIR = "ZKDIR" ZKPORT = "ZKPORT" ZKPORTADDRESS = "ZKPORTADDRESS" ZKLIMIT = "ZKLIMIT" ZKSIZE = "ZKSIZE" ALGO = "ALGORITHM" USER = "USER" GROUP = "GROUP" NAMESERVICES = "NAMESERVICES" INTERFACE = "INTERFACE" POTENTIALFLOAT = "POTENTIALFLOAT" timeunits = ["ms", "millisecond", "s", "sec", "second", "m", "min", "minute", "h", "hr", "hour", "d", "day"] datasize = ["MB"] ALPHABETS = [ "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", ] # guess from value def isBool(s): if s.lower() == "true" or s.lower() == "false": return True else: return False def isPort(name, value): if value == "" and name.endswith(".port"): return True if isInt(value) and name.endswith(".port"): return True return False def isPermissionMask(name, value): if len(value) == 3 and "umask" in name: try: _ = int("0o" + value, base=8) return True except ValueError: return False def isPermissionCode(s): if len(s) == 9: m = re.match(r"^[rwx]+$", s) if m: return True return False def isInt(s): try: _ = int(s) return True except ValueError: return False def isFloat(s): m = re.match(r"^\d+\.\d+[fF]$", s) if m: s = s[:-1] try: _ = float(s) return True except ValueError: return False def isIpAddr(s): m = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", s) return m is not None def isIpPortAddr(s): m = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d+$", s) return m is not None def isClassName(s): return s.startswith("org.apache.hadoop") or s.startswith("alluxio.") def isFilePath(s): # extend, ${} and "/" in dvalue if re.match(r"\$\{.*\}", s) and "/" in s: return True elif s.startswith("/"): return True else: return def isIntList(s): elements = s.split(",") res = True for ele in elements: res &= isInt(ele) return res def isStringList(s): return s.count(",") > 0 def isTime(s): for unit in timeunits: if s.endswith(unit): t = s[:s.find(unit)] if isInt(t): return True return False def isDataSize(s): for unit in datasize: if s.endswith(unit): t = s[:s.find(unit)] if isInt(t): return True return False def isAlgorithm(s): return s.endswith(".algorithm") # guess from name def isFilePath2(name): return name.endswith(".conf") or name.endswith('.path') def isFilePath3(name): return name.endswith(".file") or name.endswith(".file.name") or name.endswith("keytab") def isDirPath(name): return name.endswith(".dir") def isAddr(name): return name.endswith(".addr") or name.endswith(".addresses") or name.endswith(".hostname") or name.endswith("address") def isClassName2(name): return name.endswith(".class") or name.endswith(".classes") def isUser(name): return name.endswith("user") or name.endswith("users") def isGroup(name): return name.endswith("group") or name.endswith("groups") def isNameservices(name): return name.endswith("nameservices") def isInterface(name): return name.endswith("interface") or name.endswith("interfaces") def isPotentialFloat(name): return name.endswith("limit") or name.endswith("size") # guess from semantics def isFilePath4(semantics): return "relative path" in semantics or "directory" in semantics or "folder" in semantics def genBool(param): upcnt = 0 lowcnt = 0 for char in param.dvalue: if char.isupper(): upcnt += 1 elif char.islower(): lowcnt += 1 ret = "True" if param.dvalue.lower() == "true": ret = "False" elif param.dvalue.lower() == "false": ret = "True" if upcnt == 0: return [ret.lower()] elif lowcnt == 0: return [ret.upper()] else: return [ret] def genPermissionMask(param): return config.PERMISSIONMASKS def genPermissionCode(param): return config.PERMISSIONCODES def genInt(param): val = int(param.dvalue) sign = 1 if val < 0: sign = -1 val = -1 * val if val == 1: return [0, sign*2] elif val == 0: return [1, -1] else: if val <= 10: return [sign*1, sign*2*val] else: return [sign*val//2, sign*val*2] def genIntList(param): vals = param.dvalue.split(",") l1 = [] l2 = [] for val in vals: l1.append(int(val)//2) l2.append(int(val)*2) return [l1, l2] def genStringList(param): vals = param.dvalue.split(",") # /, ; assert len(vals) >= 2 return [vals[0], vals[1]] def genFloat(param): s = param.dvalue m = re.match(r"^\d+\.\d+[fF]$", s) if m: s = s[:-1] val = float(s) if val == 0.0: return [1.0, -1.0] else: return [val/2, val*2] def genPort(param): return config.PORTS def genIPPort(param): s = param.dvalue s = s[:s.find(":")] return [s + ":" + str(config.PORTS[0]), s + ":" + str(config.PORTS[1])] def genIP(param): return config.IPS def genFilePath(param): return config.FILEPATHS def genDirPath(param): return config.DIRPATHS def genTime(param): s = param.dvalue for unit in timeunits: if s.endswith(unit): t = s[:s.find(unit)] if isInt(t): t = int(t) if t == 0: return ["1" + unit, "2" + unit] elif t == 1: return ["10" + unit, "2" + unit] return ["1" + unit, str(2*t) + unit] def genData(param): s = param.dvalue for unit in datasize: if s.endswith(unit): t = s[:s.find(unit)] if isInt(t): t = int(t) if t == 0: return ["1" + unit, "2" + unit] elif t == 1: return ["10" + unit, "2" + unit] return ["1" + unit, str(2*t) + unit] def genUser(param): return config.USERS def genGroup(param): return config.GROUPS def genNameservices(param): return config.NAMESERVICES def genInterface(param): return config.INTERFACES def genAlgorithm(param): return semanticExtractionNoType(param) def genPotentialFloat(param): return [0.1, 0.5] def semanticExtractionClassName(param): # strategies # replace "/" in semantics with " " semantics = param.description + " " # extract words after key phrases from semantics arrs = [[], [], []] for phrase in config.key_phrases_plural: if phrase in semantics: parts = semantics.split(phrase) raw = parts[1].split(".")[0] raw = raw.replace(",", " ") raw = raw.replace(" and ", " ") raw = raw.replace(" or ", " ") raw = raw.strip() arrs[0] = raw.split() break for phrase in config.key_phrases_singular: if phrase in semantics: parts = semantics.split(phrase) tmp = parts[1].split(".")[0] tmp = tmp.strip() arrs[1] = [tmp] break # select ,from arr1, arr2 the one containing least non word characters # break tie by selecting the one with more values other than SKIP nonword = re.compile('\W') selected = 0 mincnt = sys.maxsize for idx, arr in enumerate(arrs): match = nonword.findall("".join(arr)) match = [x != "," for x in match] if mincnt > len(match): selected = idx mincnt = len(match) elif mincnt == len(match): if len(arrs[selected]) < len(arr): selected = idx arr = [] hasCapital = False for char in param.dvalue: if char.isupper(): hasCapital = True break for word in arrs[selected]: if word == param.dvalue: continue elif hasCapital: for char in word: if char.isupper(): arr.append(word) break if len(arr) != 0: return arr[0:2] return [] def semanticExtractionNoType(param): # strategies # replace "/" in semantics with " " semantics = param.description + " " arrs = [[], [], []] for phrase in config.key_phrases_plural: if phrase in semantics: parts = semantics.split(phrase) raw = parts[1].split(".")[0] if "." not in parts[1] and len(parts) == 2: raw = parts[1] raw = raw.replace(",", " ") raw = raw.replace(":", " ") raw = raw.replace(" and ", " ") raw = raw.replace(" or ", " ") raw = raw.strip() arrs[0] = raw.split() break for phrase in config.key_phrases_singular: if phrase in semantics: parts = semantics.split(phrase) tmp = parts[1].split(".")[0] tmp = tmp.strip() arrs[1] = [tmp] break # select ,from arr1, arr2 the one containing least non word characters # break tie by selecting the one with more values other than SKIP nonword = re.compile('\W') selected = 0 mincnt = sys.maxsize for idx, arr in enumerate(arrs): match = nonword.findall("".join(arr)) match = [x != "," for x in match] if mincnt > len(match): selected = idx mincnt = len(match) elif mincnt == len(match): if len(arrs[selected]) < len(arr): selected = idx arr = [] hasCapital = False for char in param.dvalue: if char.isupper(): hasCapital = True break for word in arrs[selected]: if word == param.dvalue: continue elif hasCapital: for char in word: if char.isupper(): arr.append(word) break else: allLower = True for char in word: if not char.islower(): allLower = False if allLower: arr.append(word) if len(arr) != 0: return arr[0:2] # map out all capital words tmpWord = "" skipChars = [] specialChars = [] arr = [] for char in param.dvalue: if char not in ALPHABETS: skipChars.append(char) for char in semantics: if char.isupper() or char in skipChars: tmpWord += char elif char != " " and char not in ALPHABETS: tmpWord += char specialChars.append(char) elif len(tmpWord) > 2: if tmpWord[0] not in ALPHABETS: tmpWord = tmpWord[1:] if tmpWord[-1] not in ALPHABETS: tmpWord = tmpWord[0:-1] if tmpWord != param.dvalue: for schar in specialChars: if schar in tmpWord: tmpWord = "" if tmpWord != "": arr.append(tmpWord) tmpWord = "" else: tmpWord = "" capcnt = 0 for char in param.dvalue: if char.isupper() or char in skipChars: capcnt += 1 if capcnt != len(param.dvalue) or
import argparse import hashlib import os import pprint import re import shutil import k3down2 import k3git import yaml from k3color import darkyellow from k3color import green from k3handy import cmdpass from k3handy import pjoin from k3handy import to_bytes from k3fs import fread from .. import mistune def sj(*args): return ''.join([str(x) for x in args]) def msg(*args): print('>', ''.join([str(x) for x in args])) def indent(line): if line == '': return '' return ' ' + line def escape(s, quote=True): s = s.replace("&", "&amp;") s = s.replace("<", "&lt;") s = s.replace(">", "&gt;") if quote: s = s.replace('"', "&quot;") return s def add_paragraph_end(lines): # add blank line to a paragraph block if lines[-1] == '': return lines lines.append('') return lines def strip_paragraph_end(lines): # remove last blank lines if lines[-1] == '': return strip_paragraph_end(lines[:-1]) return lines def code_join(n): lang = n['info'] or '' txt = '\n'.join(['```' + lang] + n['text'][:-1].split('\n') + ['```', '']) return txt def block_code_to_jpg(mdrender, n, width=None, ctx=None): txt = code_join(n) w = width if w is None: w = mdrender.conf.code_width return typ_text_to_jpg(mdrender, 'code', txt, opt={'html': {'width': w}}) def block_code_to_fixwidth_jpg(mdrender, n, ctx=None): return block_code_to_jpg(mdrender, n, width=600, ctx=ctx) def block_code_mermaid_to_jpg(mdrender, n, ctx=None): return typ_text_to_jpg(mdrender, 'mermaid', n['text']) def block_code_graphviz_to_jpg(mdrender, n, ctx=None): return typ_text_to_jpg(mdrender, 'graphviz', n['text']) def typ_text_to_jpg(mdrender, typ, txt, opt=None): d = k3down2.convert(typ, txt, 'jpg', opt=opt) fn = asset_fn(txt, 'jpg') fwrite(mdrender.conf.asset_output_dir, fn, d) return [r'![]({})'.format(mdrender.conf.img_url(fn)), ''] def math_block_to_imgtag(mdrender, n, ctx=None): return [k3down2.convert('tex_block', n['text'], 'imgtag')] def math_inline_to_imgtag(mdrender, n, ctx=None): return [k3down2.convert('tex_inline', n['text'], 'imgtag')] def math_block_to_jpg(mdrender, n, ctx=None): return typ_text_to_jpg(mdrender, 'tex_block', n['text']) def math_inline_to_jpg(mdrender, n, ctx=None): return typ_text_to_jpg(mdrender, 'tex_inline', n['text']) def math_inline_to_plaintext(mdrender, n, ctx=None): return [escape(k3down2.convert('tex_inline', n['text'], 'plain'))] def to_plaintext(mdrender, n, ctx=None): return [escape(n['text'])] def table_to_barehtml(mdrender, n, ctx=None): # create a markdown render to recursively deal with images etc. mdr = MDRender(mdrender.conf, platform=importer) md = mdr.render_node(n) md = '\n'.join(md) tablehtml = k3down2.convert('table', md, 'html') return [tablehtml, ''] def table_to_jpg(mdrender, n, ctx=None): mdr = MDRender(mdrender.conf, platform='') md = mdr.render_node(n) md = '\n'.join(md) md_base_path = os.path.split(mdrender.conf.src_path)[0] return typ_text_to_jpg(mdrender, 'md', md, opt={'html': { 'asset_base': os.path.abspath(md_base_path), }}) def importer(mdrender, n, ctx=None): ''' Importer is only used to copy local image to output dir and update image urls. This is used to deal with partial renderers, e.g., table_to_barehtml, which is not handled by univertial image importer, but need to import the image when rendering a table with images. ''' typ = n['type'] if typ == 'image': return image_local_to_remote(mdrender, n, ctx=ctx) return None def zhihu_specific(mdrender, n, ctx=None): return render_with_features(mdrender, n, ctx=ctx, features=zhihu_features) def minimal_mistake_specific(mdrender, n, ctx=None): return render_with_features(mdrender, n, ctx=ctx, features=minimal_mistake_features) def wechat_specific(mdrender, n, ctx=None): return render_with_features(mdrender, n, ctx=ctx, features=wechat_features) def weibo_specific(mdrender, n, ctx=None): typ = n['type'] if typ == 'image': return image_local_to_remote(mdrender, n, ctx=ctx) if typ == 'math_block': return math_block_to_imgtag(mdrender, n, ctx=ctx) if typ == 'math_inline': return math_inline_to_plaintext(mdrender, n, ctx=ctx) if typ == 'table': return table_to_jpg(mdrender, n, ctx=ctx) if typ == 'codespan': return [escape(n['text'])] # weibo does not support pasting <p> in <li> if typ == 'list': lines = [] lines.extend(mdrender.render(n['children'])) lines.append('') return lines if typ == 'list_item': lines = [] lines.extend(mdrender.render(n['children'])) lines.append('') return lines if typ == 'block_quote': lines = mdrender.render(n['children']) lines = strip_paragraph_end(lines) return lines if typ == 'block_code': lang = n['info'] or '' if lang == 'mermaid': return block_code_mermaid_to_jpg(mdrender, n, ctx=ctx) if lang == 'graphviz': return block_code_graphviz_to_jpg(mdrender, n, ctx=ctx) if lang == '': return block_code_to_jpg(mdrender, n, ctx=ctx) else: return block_code_to_jpg(mdrender, n, width=600, ctx=ctx) return None def simple_specific(mdrender, n, ctx=None): return render_with_features(mdrender, n, ctx=ctx, features=simple_features) class MDRender(object): # platform specific renderer platforms = { 'zhihu': zhihu_specific, 'wechat': wechat_specific, 'weibo': weibo_specific, 'minimal_mistake': minimal_mistake_specific, 'simple': simple_specific, } def __init__(self, conf, platform='zhihu'): self.conf = conf if isinstance(platform, str): self.handlers = self.platforms.get(platform, lambda *x, **y: None) else: self.handlers = platform def render_node(self, n, ctx=None): """ Render a AST node into lines of text """ typ = n['type'] # customized renderers: lines = self.handlers(self, n, ctx=ctx) if lines is not None: return lines else: # can not render, continue with default handler pass # default renderers: if typ == 'thematic_break': return ['---', ''] if typ == 'paragraph': lines = self.render(n['children']) return ''.join(lines).split('\n') + [''] if typ == 'text': return [n['text']] if typ == 'strong': lines = self.render(n['children']) lines[0] = '**' + lines[0] lines[-1] = lines[-1] + '**' return lines if typ == 'math_block': return ['$$', n['text'], '$$'] if typ == 'math_inline': return ['$$ ' + n['text'].strip() + ' $$'] if typ == 'table': return self.render(n['children']) + [''] if typ == 'table_head': alignmap = { 'left': ':--', 'right': '--:', 'center': ':-:', None: '---', } lines = self.render(n['children']) aligns = [alignmap[x['align']] for x in n['children']] aligns = '| ' + ' | '.join(aligns) + ' |' return ['| ' + ' | '.join(lines) + ' |', aligns] if typ == 'table_cell': lines = self.render(n['children']) return [''.join(lines)] if typ == 'table_body': return self.render(n['children']) if typ == 'table_row': lines = self.render(n['children']) return ['| ' + ' | '.join(lines) + ' |'] if typ == 'block_code': # remove the last \n return ['```' + (n['info'] or '')] + n['text'][:-1].split('\n') + ['```', ''] if typ == 'codespan': return [('`' + n['text'] + '`')] if typ == 'image': if n['title'] is None: return ['![{alt}]({src})'.format(**n)] else: return ['![{alt}]({src} {title})'.format(**n)] if typ == 'list': head = '- ' if n['ordered']: head = '1. ' lines = self.render(n['children'], head) return add_paragraph_end(lines) if typ == 'list_item': lines = self.render(n['children']) # ctx is head passed from list lines[0] = ctx + lines[0] lines = lines[0:1] + [indent(x) for x in lines[1:]] return lines if typ == 'block_text': lines = self.render(n['children']) return ''.join(lines).split('\n') if typ == 'block_quote': lines = self.render(n['children']) lines = strip_paragraph_end(lines) lines = ['> ' + x for x in lines] return lines + [''] if typ == 'newline': return [''] if typ == 'block_html': return add_paragraph_end([n['text']]) if typ == 'link': # TODO title lines = self.render(n['children']) lines[0] = '[' + lines[0] lines[-1] = lines[-1] + '](' + n['link'] + ')' return lines if typ == 'heading': lines = self.render(n['children']) lines[0] = '#' * n['level'] + ' ' + lines[0] return lines + [''] if typ == 'strikethrough': lines = self.render(n['children']) lines[0] = '~~' + lines[0] lines[-1] = lines[-1] + '~~' return lines if typ == 'emphasis': lines = self.render(n['children']) lines[0] = '*' + lines[0] lines[-1] = lines[-1] + '*' return lines if typ == 'inline_html': return [n['text']] if typ == 'linebreak': return [" \n"] print(typ, n.keys()) pprint.pprint(n) return ['***:' + typ] def render(self, nodes, ctx=None): rst = [] for n in nodes: rst.extend(self.render_node(n, ctx)) return rst def msg(self, *args): msg(*args) def fix_tables(nodes): """ mistune does not parse table in list item. We need to recursively fix it. """ for n in nodes: if 'children' in n: fix_tables(n['children']) if n['type'] == 'paragraph': children = n['children'] if len(children) == 0: continue c0 = children[0] if c0['type'] != 'text': continue txt = c0['text'] table_reg = r' {0,3}\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*' match = re.match(table_reg, txt) if match: mdr = MDRender(None, platform='') partialmd = mdr.render(children) partialmd = ''.join(partialmd) parser = new_parser() new_children = parser(partialmd) n['children'] = new_children def join_math_block(nodes): """ A tex segment may spans several paragraph: $$ // paragraph 1 x = 5 // y = 3 // paragraph 2 $$ // This function finds out all such paragraph and merge them into a single one. """ for n in nodes: if 'children' in n: join_math_block(n['children']) join_math_text(nodes) def parse_math(nodes): """ Extract all math segment such as ``$$ ... $$`` from a text and build a math_block or math_inline node. """ children = [] for n in nodes: if 'children' in n: n['children'] = parse_math(n['children']) if n['type'] == 'text': new_children = extract_math(n) children.extend(new_children) else: children.append(n) return children def join_math_text(nodes): i = 0 while i < len(nodes) - 1: n1 = nodes[i] n2 = nodes[i + 1] if ('children' in n1 and 'children' in n2 and len(n1['children']) > 0 and len(n2['children']) > 0 and n1['children'][-1]['type'] == 'text' and n2['children'][0]['type'] == 'text' and '$$' in n1['children'][-1]['text']): has_dd = '$$' in n2['children'][0]['text'] n1['children'][-1]['text'] += '\n\n' + n2['children'][0]['text'] n1['children'].extend(n2['children'][1:]) nodes.pop(i + 1) if has_dd: i += 1 else: i += 1 inline_math = r'\$\$(.*?)\$\$' def extract_math(n): """ Extract ``$$ ... $$`` from a text node and build a new node.
= YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True) def _get_floating_ip_required(self): """ Getter method for floating_ip_required, mapped from YANG variable /nst/netslice_connection_point/floating_ip_required (boolean) YANG Description: Boolean parameter to indicate whether the CP must be exposed. A public IP address will be allocated to this CP if exposed is true. The default is false meaning a floating IP address is not required. It must be explicitly asked for a floating IP address to be allocated. """ return self.__floating_ip_required def _set_floating_ip_required(self, v, load=False): """ Setter method for floating_ip_required, mapped from YANG variable /nst/netslice_connection_point/floating_ip_required (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_floating_ip_required is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_floating_ip_required() directly. YANG Description: Boolean parameter to indicate whether the CP must be exposed. A public IP address will be allocated to this CP if exposed is true. The default is false meaning a floating IP address is not required. It must be explicitly asked for a floating IP address to be allocated. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="floating-ip-required", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='boolean', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """floating_ip_required must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="floating-ip-required", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='boolean', is_config=True)""", }) self.__floating_ip_required = t if hasattr(self, '_set'): self._set() def _unset_floating_ip_required(self): self.__floating_ip_required = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="floating-ip-required", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='boolean', is_config=True) def _get_netslice_vld_id_ref(self): """ Getter method for netslice_vld_id_ref, mapped from YANG variable /nst/netslice_connection_point/netslice_vld_id_ref (leafref) YANG Description: ID reference to a NSVLD in the NS """ return self.__netslice_vld_id_ref def _set_netslice_vld_id_ref(self, v, load=False): """ Setter method for netslice_vld_id_ref, mapped from YANG variable /nst/netslice_connection_point/netslice_vld_id_ref (leafref) If this variable is read-only (config: false) in the source YANG file, then _set_netslice_vld_id_ref is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_netslice_vld_id_ref() directly. YANG Description: ID reference to a NSVLD in the NS """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="netslice-vld-id-ref", parent=self, choice=(u'connection', u'netslice-vld-ref'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """netslice_vld_id_ref must be of a type compatible with leafref""", 'defined-type': "leafref", 'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="netslice-vld-id-ref", parent=self, choice=(u'connection', u'netslice-vld-ref'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)""", }) self.__netslice_vld_id_ref = t if hasattr(self, '_set'): self._set() def _unset_netslice_vld_id_ref(self): self.__netslice_vld_id_ref = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="netslice-vld-id-ref", parent=self, choice=(u'connection', u'netslice-vld-ref'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True) def _get_nsd_id_ref(self): """ Getter method for nsd_id_ref, mapped from YANG variable /nst/netslice_connection_point/nsd_id_ref (leafref) YANG Description: A reference to a nsd. """ return self.__nsd_id_ref def _set_nsd_id_ref(self, v, load=False): """ Setter method for nsd_id_ref, mapped from YANG variable /nst/netslice_connection_point/nsd_id_ref (leafref) If this variable is read-only (config: false) in the source YANG file, then _set_nsd_id_ref is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_nsd_id_ref() directly. YANG Description: A reference to a nsd. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="nsd-id-ref", parent=self, choice=(u'connection', u'nsd-connection-point-ref'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """nsd_id_ref must be of a type compatible with leafref""", 'defined-type': "leafref", 'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="nsd-id-ref", parent=self, choice=(u'connection', u'nsd-connection-point-ref'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)""", }) self.__nsd_id_ref = t if hasattr(self, '_set'): self._set() def _unset_nsd_id_ref(self): self.__nsd_id_ref = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="nsd-id-ref", parent=self, choice=(u'connection', u'nsd-connection-point-ref'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True) def _get_nsd_connection_point_ref(self): """ Getter method for nsd_connection_point_ref, mapped from YANG variable /nst/netslice_connection_point/nsd_connection_point_ref (leafref) """ return self.__nsd_connection_point_ref def _set_nsd_connection_point_ref(self, v, load=False): """ Setter method for nsd_connection_point_ref, mapped from YANG variable /nst/netslice_connection_point/nsd_connection_point_ref (leafref) If this variable is read-only (config: false) in the source YANG file, then _set_nsd_connection_point_ref is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_nsd_connection_point_ref() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="nsd-connection-point-ref", parent=self, choice=(u'connection', u'nsd-connection-point-ref'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """nsd_connection_point_ref must be of a type compatible with leafref""", 'defined-type': "leafref", 'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="nsd-connection-point-ref", parent=self, choice=(u'connection', u'nsd-connection-point-ref'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)""", }) self.__nsd_connection_point_ref = t if hasattr(self, '_set'): self._set() def _unset_nsd_connection_point_ref(self): self.__nsd_connection_point_ref = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="nsd-connection-point-ref", parent=self, choice=(u'connection', u'nsd-connection-point-ref'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True) name = __builtin__.property(_get_name, _set_name) floating_ip_required = __builtin__.property(_get_floating_ip_required, _set_floating_ip_required) netslice_vld_id_ref = __builtin__.property(_get_netslice_vld_id_ref, _set_netslice_vld_id_ref) nsd_id_ref = __builtin__.property(_get_nsd_id_ref, _set_nsd_id_ref) nsd_connection_point_ref = __builtin__.property(_get_nsd_connection_point_ref, _set_nsd_connection_point_ref) __choices__ = {u'connection': {u'netslice-vld-ref': [u'netslice_vld_id_ref'], u'nsd-connection-point-ref': [u'nsd_id_ref', u'nsd_connection_point_ref']}} _pyangbind_elements = OrderedDict([('name', name), ('floating_ip_required', floating_ip_required), ('netslice_vld_id_ref', netslice_vld_id_ref), ('nsd_id_ref', nsd_id_ref), ('nsd_connection_point_ref', nsd_connection_point_ref), ]) class yc_provider_network_nst__nst_netslice_vld_provider_network(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module nst - based on the path /nst/netslice-vld/provider-network. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Container for the provider network. """ __slots__ = ('_path_helper', '_extmethods', '__physical_network','__segmentation_id',) _yang_name = 'provider-network' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__segmentation_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="segmentation_id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint32', is_config=True) self.__physical_network = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="physical-network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'nst', u'netslice-vld', u'provider-network'] def _get_physical_network(self): """ Getter method for physical_network, mapped from YANG variable /nst/netslice_vld/provider_network/physical_network (string) YANG Description: Name of the physical network on which the provider network is built. """ return self.__physical_network def _set_physical_network(self, v, load=False): """ Setter method for physical_network, mapped from YANG variable /nst/netslice_vld/provider_network/physical_network (string) If this variable is read-only (config: false) in the source YANG file, then _set_physical_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_physical_network() directly. YANG Description: Name of the physical network on which the provider network is built. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="physical-network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """physical_network must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="physical-network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""", }) self.__physical_network = t if hasattr(self, '_set'): self._set() def _unset_physical_network(self): self.__physical_network = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="physical-network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True) def _get_segmentation_id(self): """ Getter method for segmentation_id, mapped from YANG variable /nst/netslice_vld/provider_network/segmentation_id (uint32) YANG Description: ID of segregated virtual networks """ return self.__segmentation_id def _set_segmentation_id(self, v, load=False): """ Setter method for segmentation_id, mapped from YANG variable /nst/netslice_vld/provider_network/segmentation_id (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_segmentation_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_segmentation_id() directly. YANG Description: ID of segregated virtual networks """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="segmentation_id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint32', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """segmentation_id must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="segmentation_id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint32', is_config=True)""", }) self.__segmentation_id = t if hasattr(self, '_set'): self._set() def _unset_segmentation_id(self): self.__segmentation_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="segmentation_id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint32', is_config=True) physical_network = __builtin__.property(_get_physical_network, _set_physical_network) segmentation_id = __builtin__.property(_get_segmentation_id, _set_segmentation_id) _pyangbind_elements = OrderedDict([('physical_network', physical_network), ('segmentation_id', segmentation_id), ]) class yc_nss_connection_point_ref_nst__nst_netslice_vld_nss_connection_point_ref(PybindBase):
import numpy as np import matplotlib #matplotlib.use('KtAgg') import matplotlib.pylab as plt import matplotlib.gridspec as gridspec from matplotlib import colors from matplotlib.patches import Circle from matplotlib.figure import * from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as NavigationToolbar import os if os.environ.get('QT_API', None) is None: from PyQt4 import QtGui, QtCore else: from PySide import QtGui, QtCore from astropy import units as u from tardis import analysis, util # def current_ion_index(index, index_list): # if not index in index_list: # return None # if not (index - 1) in index_list: # return 0 # else: # return current_ion_index(index - 1, index_list) + 1 # # def current_ion_index(index, duplicate_list): # if duplicate_list[index - 1] != duplicate_list[index]: # return 0 # else: # return current_ion_index(index - 1, duplicate_list) + 1 class ModelViewer(QtGui.QWidget): def __init__(self, parent=None): # assumes that qt has already been initialized by starting IPython with the flag "--pylab=qt" app = QtCore.QCoreApplication.instance() if app is None: app = QtGui.QApplication([]) try: from IPython.lib.guisupport import start_event_loop_qt4 start_event_loop_qt4(app) except ImportError: app.exec_() super(ModelViewer, self).__init__(parent) self.model = None self.shell_info = {} self.line_info = [] self.setGeometry(20, 35, 1250, 500) self.setWindowTitle('Shells Viewer') self.tablemodel = SimpleTableModel([['Shell: '], ["Rad. temp", "Ws"]], (1, 0)) self.tableview = QtGui.QTableView() self.graph = MatplotlibWidget(self, 'model') self.graph_label = QtGui.QLabel('Select Property:') self.graph_button = QtGui.QToolButton() self.spectrum = MatplotlibWidget(self) self.spectrum_label = QtGui.QLabel('Select Spectrum:') self.spectrum_button = QtGui.QToolButton() self.spectrum_span_button = QtGui.QPushButton('Show Wavelength Range') self.spectrum_line_info_button = QtGui.QPushButton('Show Line Info') self.layout = QtGui.QHBoxLayout() self.graph_sublayout = QtGui.QVBoxLayout() self.graph_subsublayout = QtGui.QHBoxLayout() self.spectrum_sublayout = QtGui.QVBoxLayout() self.spectrum_subsublayout = QtGui.QHBoxLayout() self.tableview.setMinimumWidth(200) self.tableview.connect(self.tableview.verticalHeader(), QtCore.SIGNAL('sectionClicked(int)'), self.graph.highlight_shell) self.tableview.connect(self.tableview.verticalHeader(), QtCore.SIGNAL('sectionDoubleClicked(int)'), self.on_header_double_clicked) self.graph_button.setText('Rad. temp') self.spectrum_button.setText('spec_flux_angstrom') self.graph_button.setPopupMode(QtGui.QToolButton.MenuButtonPopup) self.spectrum_button.setPopupMode(QtGui.QToolButton.MenuButtonPopup) self.graph_button.setMenu(QtGui.QMenu(self.graph_button)) self.spectrum_button.setMenu(QtGui.QMenu(self.spectrum_button)) self.graph_button.menu().addAction('Rad. temp').triggered.connect(self.change_graph_to_t_rads) self.graph_button.menu().addAction('Ws').triggered.connect(self.change_graph_to_ws) self.spectrum_button.menu().addAction('spec_flux_angstrom').triggered.connect(self.change_spectrum_to_spec_flux_angstrom) self.spectrum_button.menu().addAction('spec_virtual_flux_angstrom').triggered.connect(self.change_spectrum_to_spec_virtual_flux_angstrom) self.spectrum_span_button.clicked.connect(self.spectrum.show_span) self.spectrum_line_info_button.clicked.connect(self.spectrum.show_line_info) self.layout.addWidget(self.tableview) self.graph_subsublayout.addWidget(self.graph_label) self.graph_subsublayout.addWidget(self.graph_button) self.graph_sublayout.addLayout(self.graph_subsublayout) self.graph_sublayout.addWidget(self.graph) self.layout.addLayout(self.graph_sublayout) self.spectrum_subsublayout.addWidget(self.spectrum_span_button) self.spectrum_subsublayout.addWidget(self.spectrum_label) self.spectrum_subsublayout.addWidget(self.spectrum_button) self.spectrum_sublayout.addLayout(self.spectrum_subsublayout) self.spectrum_sublayout.addWidget(self.spectrum_line_info_button) self.spectrum_sublayout.addWidget(self.spectrum) self.spectrum_sublayout.addWidget(self.spectrum.toolbar) self.layout.addLayout(self.spectrum_sublayout) self.spectrum_line_info_button.hide() self.setLayout(self.layout) def show_model(self, model=None): if model: self.change_model(model) self.tableview.setModel(self.tablemodel) self.plot_model() self.plot_spectrum() self.show() def update_data(self, model=None): if model: self.change_model(model) self.tablemodel.updateTable() for index in self.shell_info.keys(): self.shell_info[index].update_tables() self.plot_model() if self.graph_button.text == 'Ws': self.change_graph_to_ws() self.plot_spectrum() if self.spectrum_button.text == 'spec_virtual_flux_angstrom': self.change_spectrum_to_spec_virtual_flux_angstrom() self.show() def change_model(self, model): self.model = model self.tablemodel.arraydata = [] self.tablemodel.addData(model.t_rads.value.tolist()) self.tablemodel.addData(model.ws.tolist()) def change_spectrum_to_spec_virtual_flux_angstrom(self): if self.model.spectrum_virtual.luminosity_density_lambda is None: luminosity_density_lambda = np.zeros_like(self.model.spectrum_virtual.wavelength) else: luminosity_density_lambda = self.model.spectrum_virtual.luminosity_density_lambda.value self.change_spectrum(luminosity_density_lambda, 'spec_flux_angstrom') def change_spectrum_to_spec_flux_angstrom(self): if self.model.spectrum.luminosity_density_lambda is None: luminosity_density_lambda = np.zeros_like(self.model.spectrum.wavelength) else: luminosity_density_lambda = self.model.spectrum.luminosity_density_lambda.value self.change_spectrum(luminosity_density_lambda, 'spec_flux_angstrom') def change_spectrum(self, data, name): self.spectrum_button.setText(name) self.spectrum.dataplot[0].set_ydata(data) self.spectrum.ax.relim() self.spectrum.ax.autoscale() self.spectrum.draw() def plot_spectrum(self): self.spectrum.ax.clear() self.spectrum.ax.set_title('Spectrum') self.spectrum.ax.set_xlabel('Wavelength (A)') self.spectrum.ax.set_ylabel('Intensity') wavelength = self.model.spectrum.wavelength.value if self.model.spectrum.luminosity_density_lambda is None: luminosity_density_lambda = np.zeros_like(wavelength) else: luminosity_density_lambda = self.model.spectrum.luminosity_density_lambda.value self.spectrum.dataplot = self.spectrum.ax.plot(wavelength, luminosity_density_lambda, label='b') self.spectrum.draw() def change_graph_to_ws(self): self.change_graph(self.model.ws, 'Ws', '') def change_graph_to_t_rads(self): self.change_graph(self.model.t_rads.value, 't_rads', '(K)') def change_graph(self, data, name, unit): self.graph_button.setText(name) self.graph.dataplot[0].set_ydata(data) self.graph.ax1.relim() self.graph.ax1.autoscale() self.graph.ax1.set_title(name + ' vs Shell') self.graph.ax1.set_ylabel(name + ' ' + unit) normalizer = colors.Normalize(vmin=data.min(), vmax=data.max()) color_map = plt.cm.ScalarMappable(norm=normalizer, cmap=plt.cm.jet) color_map.set_array(data) self.graph.cb.set_clim(vmin=data.min(), vmax=data.max()) self.graph.cb.update_normal(color_map) if unit == '(K)': unit = 'T (K)' self.graph.cb.set_label(unit) for i, item in enumerate(data): self.shells[i].set_facecolor(color_map.to_rgba(item)) self.graph.draw() def plot_model(self): self.graph.ax1.clear() self.graph.ax1.set_title('Rad. Temp vs Shell') self.graph.ax1.set_xlabel('Shell Number') self.graph.ax1.set_ylabel('Rad. Temp (K)') self.graph.ax1.yaxis.get_major_formatter().set_powerlimits((0, 1)) self.graph.dataplot = self.graph.ax1.plot(range(len(self.model.t_rads.value)), self.model.t_rads.value) self.graph.ax2.clear() self.graph.ax2.set_title('Shell View') self.graph.ax2.set_xlabel('Arbitrary') self.graph.ax2.set_ylabel('Arbitrary') self.shells = [] t_rad_normalizer = colors.Normalize(vmin=self.model.t_rads.value.min(), vmax=self.model.t_rads.value.max()) t_rad_color_map = plt.cm.ScalarMappable(norm=t_rad_normalizer, cmap=plt.cm.jet) t_rad_color_map.set_array(self.model.t_rads.value) if self.graph.cb: self.graph.cb.set_clim(vmin=self.model.t_rads.value.min(), vmax=self.model.t_rads.value.max()) self.graph.cb.update_normal(t_rad_color_map) else: self.graph.cb = self.graph.figure.colorbar(t_rad_color_map) self.graph.cb.set_label('T (K)') self.graph.normalizing_factor = 0.2 * (self.model.tardis_config.structure.r_outer.value[-1] - self.model.tardis_config.structure.r_inner.value[0]) / self.model.tardis_config.structure.r_inner.value[0] #self.graph.normalizing_factor = 8e-16 for i, t_rad in enumerate(self.model.t_rads.value): r_inner = self.model.tardis_config.structure.r_inner.value[i] * self.graph.normalizing_factor r_outer = self.model.tardis_config.structure.r_outer.value[i] * self.graph.normalizing_factor self.shells.append(Shell(i, (0,0), r_inner, r_outer, facecolor=t_rad_color_map.to_rgba(t_rad), picker=self.graph.shell_picker)) self.graph.ax2.add_patch(self.shells[i]) self.graph.ax2.set_xlim(0, self.model.tardis_config.structure.r_outer.value[-1] * self.graph.normalizing_factor) self.graph.ax2.set_ylim(0, self.model.tardis_config.structure.r_outer.value[-1] * self.graph.normalizing_factor) self.graph.figure.tight_layout() self.graph.draw() def on_header_double_clicked(self, index): self.shell_info[index] = ShellInfo(index, self) class ShellInfo(QtGui.QDialog): def __init__(self, index, parent=None): super(ShellInfo, self).__init__(parent) self.parent = parent self.shell_index = index self.setGeometry(400, 150, 200, 400) self.setWindowTitle('Shell %d Abundances' % (self.shell_index + 1)) self.atomstable = QtGui.QTableView() self.ionstable = QtGui.QTableView() self.levelstable = QtGui.QTableView() self.atomstable.connect(self.atomstable.verticalHeader(), QtCore.SIGNAL('sectionClicked(int)'), self.on_atom_header_double_clicked) self.table1_data = self.parent.model.tardis_config.abundances[self.shell_index] self.atomsdata = SimpleTableModel([['Z = '], ['Count (Shell %d)' % (self.shell_index + 1)]], iterate_header=(2, 0), index_info=self.table1_data.index.values.tolist()) self.ionsdata = None self.levelsdata = None self.atomsdata.addData(self.table1_data.values.tolist()) self.atomstable.setModel(self.atomsdata) self.layout = QtGui.QHBoxLayout() self.layout.addWidget(self.atomstable) self.layout.addWidget(self.ionstable) self.layout.addWidget(self.levelstable) self.setLayout(self.layout) self.ionstable.hide() self.levelstable.hide() self.show() def on_atom_header_double_clicked(self, index): self.current_atom_index = self.table1_data.index.values.tolist()[index] self.table2_data = self.parent.model.plasma_array.ion_populations[self.shell_index].ix[self.current_atom_index] self.ionsdata = SimpleTableModel([['Ion: '], ['Count (Z = %d)' % self.current_atom_index]], iterate_header=(2, 0), index_info=self.table2_data.index.values.tolist()) normalized_data = [] for item in self.table2_data.values: normalized_data.append(float(item / self.parent.model.tardis_config.number_densities[self.shell_index] .ix[self.current_atom_index])) self.ionsdata.addData(normalized_data) self.ionstable.setModel(self.ionsdata) self.ionstable.connect(self.ionstable.verticalHeader(), QtCore.SIGNAL('sectionClicked(int)'), self.on_ion_header_double_clicked) self.levelstable.hide() self.ionstable.setColumnWidth(0, 120) self.ionstable.show() self.setGeometry(400, 150, 380, 400) self.show() def on_ion_header_double_clicked(self, index): self.current_ion_index = self.table2_data.index.values.tolist()[index] self.table3_data = self.parent.model.plasma_array.level_populations[self.shell_index].ix[self.current_atom_index, self.current_ion_index] self.levelsdata = SimpleTableModel([['Level: '], ['Count (Ion %d)' % self.current_ion_index]], iterate_header=(2, 0), index_info=self.table3_data.index.values.tolist()) normalized_data = [] for item in self.table3_data.values.tolist(): normalized_data.append(float(item / self.table2_data.ix[self.current_ion_index])) self.levelsdata.addData(normalized_data) self.levelstable.setModel(self.levelsdata) self.levelstable.setColumnWidth(0, 120) self.levelstable.show() self.setGeometry(400, 150, 580, 400) self.show() def update_tables(self): self.table1_data = self.parent.model.plasma_array[self.shell_index].number_densities self.atomsdata.index_info=self.table1_data.index.values.tolist() self.atomsdata.arraydata = [] self.atomsdata.addData(self.table1_data.values.tolist()) self.atomsdata.updateTable() self.ionstable.hide() self.levelstable.hide() self.setGeometry(400, 150, 200, 400) self.show() class LineInteractionTables(QtGui.QWidget): def __init__(self, line_interaction_analysis, atom_data, description): super(LineInteractionTables, self).__init__() self.text_description = QtGui.QLabel(str(description)) self.species_table = QtGui.QTableView() self.transitions_table = QtGui.QTableView() self.layout = QtGui.QHBoxLayout() self.line_interaction_analysis = line_interaction_analysis self.atom_data = atom_data line_interaction_species_group = line_interaction_analysis.last_line_in.groupby(['atomic_number', 'ion_number']) self.species_selected = sorted(line_interaction_species_group.groups.keys()) species_symbols = [util.species_tuple_to_string(item, atom_data) for item in self.species_selected] species_table_model = SimpleTableModel([species_symbols, ['Species']]) species_abundances = (line_interaction_species_group.wavelength.count().astype(float) / line_interaction_analysis.last_line_in.wavelength.count()).astype(float).tolist() species_abundances = map(float, species_abundances) species_table_model.addData(species_abundances) self.species_table.setModel(species_table_model) line_interaction_species_group.wavelength.count() self.layout.addWidget(self.text_description) self.layout.addWidget(self.species_table) self.species_table.connect(self.species_table.verticalHeader(), QtCore.SIGNAL('sectionClicked(int)'), self.on_species_clicked) self.layout.addWidget(self.transitions_table) self.setLayout(self.layout) self.show() def on_species_clicked(self, index): current_species = self.species_selected[index] last_line_in = self.line_interaction_analysis.last_line_in last_line_out = self.line_interaction_analysis.last_line_out last_line_in_filter = (last_line_in.atomic_number == current_species[0]).values & \ (last_line_in.ion_number == current_species[1]).values current_last_line_in = last_line_in[last_line_in_filter].reset_index() current_last_line_out = last_line_out[last_line_in_filter].reset_index() current_last_line_in['line_id_out'] = current_last_line_out['line_id'] last_line_in_string = [] last_line_count = [] grouped_line_interactions = current_last_line_in.groupby(['line_id', 'line_id_out']) exc_deexc_string = 'exc. %d-%d (%.2f A) de-exc. %d-%d (%.2f A)' for line_id, row in grouped_line_interactions.wavelength.count().iteritems(): current_line_in = self.atom_data.lines.ix[line_id[0]] current_line_out = self.atom_data.lines.ix[line_id[1]] last_line_in_string.append(exc_deexc_string % (current_line_in['level_number_lower'], current_line_in['level_number_upper'], current_line_in['wavelength'], current_line_out['level_number_upper'], current_line_out['level_number_lower'], current_line_out['wavelength'])) last_line_count.append(int(row)) last_line_in_model = SimpleTableModel([last_line_in_string, ['Num. pkts %d' % current_last_line_in.wavelength.count()]]) last_line_in_model.addData(last_line_count) self.transitions_table.setModel(last_line_in_model) class LineInfo(QtGui.QDialog): def __init__(self, parent, wavelength_start, wavelength_end): super(LineInfo, self).__init__(parent) self.parent = parent self.setGeometry(180 + len(self.parent.line_info) * 20, 150, 250, 400) self.setWindowTitle('Line Interaction: %.2f - %.2f (A) ' % (wavelength_start, wavelength_end, )) self.layout = QtGui.QVBoxLayout() packet_nu_line_interaction = analysis.LastLineInteraction.from_model(self.parent.model) packet_nu_line_interaction.packet_filter_mode = 'packet_nu' packet_nu_line_interaction.wavelength_start = wavelength_start * u.angstrom packet_nu_line_interaction.wavelength_end = wavelength_end * u.angstrom line_in_nu_line_interaction = analysis.LastLineInteraction.from_model(self.parent.model) line_in_nu_line_interaction.packet_filter_mode = 'line_in_nu' line_in_nu_line_interaction.wavelength_start = wavelength_start * u.angstrom line_in_nu_line_interaction.wavelength_end = wavelength_end * u.angstrom self.layout.addWidget(LineInteractionTables(packet_nu_line_interaction, self.parent.model.atom_data, 'filtered by frequency of packet')) self.layout.addWidget(LineInteractionTables(line_in_nu_line_interaction, self.parent.model.atom_data, 'filtered by frequency of line interaction')) self.setLayout(self.layout) self.show() def get_data(self, wavelength_start, wavelength_end): self.wavelength_start = wavelength_start * u.angstrom self.wavelength_end = wavelength_end * u.angstrom last_line_in_ids, last_line_out_ids = analysis.get_last_line_interaction(self.wavelength_start, self.wavelength_end, self.parent.model) self.last_line_in, self.last_line_out = self.parent.model.atom_data.lines.ix[last_line_in_ids], self.parent.model.atom_data.lines.ix[last_line_out_ids] self.grouped_lines_in, self.grouped_lines_out = self.last_line_in.groupby(['atomic_number', 'ion_number']), self.last_line_out.groupby(['atomic_number', 'ion_number']) self.ions_in, self.ions_out = self.grouped_lines_in.groups.keys(), self.grouped_lines_out.groups.keys() self.ions_in.sort() self.ions_out.sort() self.header_list = [] self.ion_table = (self.grouped_lines_in.wavelength.count().astype(float) / self.grouped_lines_in.wavelength.count().sum()).values.tolist() for z, ion in self.ions_in: self.header_list.append('Z = %d: Ion %d' % (z, ion)) def get_transition_table(self, lines, atom, ion): grouped = lines.groupby(['atomic_number', 'ion_number']) transitions_with_duplicates = lines.ix[grouped.groups[(atom, ion)]].groupby(['level_number_lower', 'level_number_upper']).groups transitions = lines.ix[grouped.groups[(atom, ion)]].drop_duplicates().groupby(['level_number_lower', 'level_number_upper']).groups transitions_count = [] transitions_parsed = [] for item in transitions.values(): c = 0 for ditem in transitions_with_duplicates.values(): c += ditem.count(item[0]) transitions_count.append(c) s = 0 for item in transitions_count: s += item for index in range(len(transitions_count)): transitions_count[index] /= float(s) for key, value in transitions.items(): transitions_parsed.append("%d-%d (%.2f A)" % (key[0], key[1], self.parent.model.atom_data.lines.ix[value[0]]['wavelength'])) return transitions_parsed, transitions_count def on_atom_clicked(self, index): self.transitionsin_parsed, self.transitionsin_count = self.get_transition_table(self.last_line_in, self.ions_in[index][0], self.ions_in[index][1]) self.transitionsout_parsed, self.transitionsout_count = self.get_transition_table(self.last_line_out, self.ions_out[index][0], self.ions_out[index][1]) self.transitionsindata = SimpleTableModel([self.transitionsin_parsed, ['Lines In']]) self.transitionsoutdata = SimpleTableModel([self.transitionsout_parsed, ['Lines Out']]) self.transitionsindata.addData(self.transitionsin_count) self.transitionsoutdata.addData(self.transitionsout_count) self.transitionsintable.setModel(self.transitionsindata) self.transitionsouttable.setModel(self.transitionsoutdata) self.transitionsintable.show() self.transitionsouttable.show() self.setGeometry(180 + len(self.parent.line_info) * 20, 150, 750, 400) self.show() def on_atom_clicked2(self, index): self.transitionsin_parsed, self.transitionsin_count = self.get_transition_table(self.last_line_in, self.ions_in[index][0], self.ions_in[index][1]) self.transitionsout_parsed, self.transitionsout_count = self.get_transition_table(self.last_line_out, self.ions_out[index][0], self.ions_out[index][1]) self.transitionsindata = SimpleTableModel([self.transitionsin_parsed, ['Lines In']]) self.transitionsoutdata = SimpleTableModel([self.transitionsout_parsed, ['Lines Out']]) self.transitionsindata.addData(self.transitionsin_count) self.transitionsoutdata.addData(self.transitionsout_count) self.transitionsintable2.setModel(self.transitionsindata) self.transitionsouttable2.setModel(self.transitionsoutdata) self.transitionsintable2.show() self.transitionsouttable2.show() self.setGeometry(180 + len(self.parent.line_info) * 20, 150, 750, 400) self.show() class SimpleTableModel(QtCore.QAbstractTableModel): def __init__(self, headerdata=None, iterate_header=(0, 0), index_info=None, parent=None, *args): super(SimpleTableModel, self).__init__(parent, *args) self.headerdata = headerdata self.arraydata = [] self.iterate_header = iterate_header self.index_info = index_info def addData(self, datain): self.arraydata.append(datain) def rowCount(self, parent=QtCore.QModelIndex()): return len(self.arraydata[0]) def columnCount(self, parent=QtCore.QModelIndex()): return len(self.arraydata) def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole): if orientation == QtCore.Qt.Vertical and role == QtCore.Qt.DisplayRole: if self.iterate_header[0] == 1: return self.headerdata[0][0] + str(section + 1) elif self.iterate_header[0] == 2: if self.index_info: return self.headerdata[0][0] + str(self.index_info[section]) else: return self.headerdata[0][0] + str(section + 1) else: return self.headerdata[0][section] elif orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole: if self.iterate_header[1] == 1: return self.headerdata[1][0] + str(section + 1) elif self.iterate_header[1] == 2: if self.index_info: return self.headerdata[1][0] + str(self.index_info[section]) else: return self.headerdata[1][section] return "" def data(self, index, role=QtCore.Qt.DisplayRole): if not index.isValid(): return None elif role != QtCore.Qt.DisplayRole: return None return (self.arraydata[index.column()][index.row()]) def setData(self, index, value, role=QtCore.Qt.EditRole): if not index.isValid(): return False elif
from tkinter import * #imports the tkinter module from tkinter import ttk #imports the ttk submodule in tkinter from tkinter import messagebox #imports the messagebox submodule in tkinter from random import choice, randint #imports the choice and randint functions from the random module #creates the Questionnaire (confidence) class. class QuestionApp(Frame): #initiates the class's initial properties def __init__(self, master, filename): super(QuestionApp, self).__init__(master) self.master.protocol("WM_DELETE_WINDOW", self.confirmclosure) self.master.iconbitmap('aclogo.ico') self.count = 0 self.countmax = 10 self.filename = filename self.opener = open(filename,"r") self.statements = list(self.opener) self.attempts = [] self.randlist = [] self.resultlab = [] self.pack() self.app() #confirms whether the program will be closed when the top-right close button is clicked def confirmclosure(self): if messagebox.askokcancel("Quitting the program","Are you sure you want to close this test? Data entered will NEITHER be saved NOR be considered in the final assessment results!"): self.master.destroy() #initialises the questionnaire window def app(self): self.lab1 = Label(self, text="Answer the questions using the buttons provided") self.lab1.pack() self.lab2 = Label(self, text="Questions asked: " + str(self.count) + " out of " + str(self.countmax)) self.lab2.pack() self.nacks = Label(self, text=self.choico()) self.nacks.pack() self.but1 = Button(self, text="Always Agree") self.but2 = Button(self, text="Agree") self.but3 = Button(self, text="I'm not exactly sure") self.but4 = Button(self, text="Disagree") self.but5 = Button(self, text="Always Disagree") self.but1.configure(command=lambda: self.buttons(self.but1)) self.but2.configure(command=lambda: self.buttons(self.but2)) self.but3.configure(command=lambda: self.buttons(self.but3)) self.but4.configure(command=lambda: self.buttons(self.but4)) self.but5.configure(command=lambda: self.buttons(self.but5)) for i in [self.but1, self.but2, self.but3, self.but4, self.but5]: i.pack() #selects a random statement from the file def choico(self): randst = choice(self.statements).strip() while randst in self.randlist: randst = choice(self.statements).strip() self.randlist.append(randst) return randst #creates an exit button if the limit is reached def exitbutton(self): if self.count >= self.countmax: self.end() self.but6 = Button(self, text="Exit", command=self.kill) self.but6.pack() #adds the selected response to the attempts dictionary to be added to the final results in a different form def buttons(self,botun): if self.count < self.countmax: self.count += 1 self.lab2['text'] = "Questions asked: " + str(self.count) + " out of " + str(self.countmax) self.dicto = dict([('Number',self.count),('Question',self.nacks['text']),('Response',botun['text'])]) self.attempts.append(self.dicto) self.nacks['text'] = self.choico() self.exitbutton() #closes the window (function reserved for the exit button at the end) def kill(self): self.master.destroy() #adds the details in the attempts array to a stringed form in resultlab def end(self): for i in range(len(self.attempts)): self.resultlab.append("Question {0}: {1}\nYou responded: {2}\n".format(self.attempts[i]['Number'],self.attempts[i]['Question'],self.attempts[i]['Response'])) #adapts the data in resultlab into a string to be added to the results at the end of the whole assessment, in the form of a text file def file(self): self.endreturn = "" for i in self.resultlab: self.endreturn += i return self.endreturn #creates the organisation class class OrganisationApp(Frame): #initiates the class's initial properties def __init__(self,master): super(OrganisationApp, self).__init__(master) self.master.protocol("WM_DELETE_WINDOW", self.confirmclosure) self.master.iconbitmap('aclogo.ico') self.resultlab = [] self.select = randint(1,3) self.pack() if self.select == 1: self.FireDrill() elif self.select == 2: self.PlanDay() elif self.select == 3: self.Revise() #confirms whether the program will be closed when the top-right close button is clicked def confirmclosure(self): if messagebox.askokcancel("Quitting the program","Are you sure you want to close this test? Data entered will NEITHER be saved NOR be considered in the final assessment results!"): self.master.destroy() #confirms whether the user wants to send this data through and end the test def confirm(self): confirmation = messagebox.askquestion("Confirm responses","Are you sure you want to send this data through?",icon="question") if confirmation == "yes": self.retreive() else: None #adapts the data in resultlab into a string to be added to the results at the end of the whole assessment, in the form of a text file def file(self): self.endreturn = "" for i in self.resultlab: self.endreturn += i return self.endreturn #retreives the answers from the drop-down menus and then appends them to resultlab #through a specific function that depends on which topic was randomly selected def retreive(self): self.ans1 = self.dr1store.get() self.ans2 = self.dr2store.get() self.ans3 = self.dr3store.get() self.correctans = 0 if self.select == 1: self.fireanswer() elif self.select == 2: self.plananswer() elif self.select == 3: self.reviseanswer() #loads the "fire drill" questionnaire if it's randomly selected def FireDrill(self): self.labtitle = Label(self,text="Topic for this test: The Fire Drill") self.labtitle.pack() self.q1 = Label(self,text="The Fire Alarm has just gone off. What do you do?") self.q1.pack() self.ac = ["Get Coats and Bags","Look for Friends","Head to Central Meeting Point","Line up in groups","Leave building immediately"] self.dr1store = StringVar(self) self.dr1store.set("Select one") self.dr2store = StringVar(self) self.dr2store.set("Select one") self.dr3store = StringVar(self) self.dr3store.set("Select one") for i in (self.dr1store,self.dr2store,self.dr3store): OptionMenu(self,i,*self.ac).pack() self.botexit = Button(self,text="Submit",command=self.confirm) self.botexit.pack() #loads the "planning day" questionnaire if it's randomly selected def PlanDay(self): self.labtitle = Label(self,text="Topic for this test: Planning in the night for the day ahead") self.labtitle.pack() self.q1 = Label(self,text="Before you go to bed, what should you do before planning for the next day?") self.q1.pack() self.ac = ["Pack all your bags the night before", "Check timetable to ensure you have packed the correct books", "Go to bed very late", "Talk to a friend late in the night", "Pack your pencil case and put it in the bag"] self.dr1store = StringVar(self) self.dr1store.set("Select one") self.dr2store = StringVar(self) self.dr2store.set("Select one") self.dr3store = StringVar(self) self.dr3store.set("Select one") for i in (self.dr1store,self.dr2store,self.dr3store): OptionMenu(self,i,*self.ac).pack() self.botexit = Button(self,text="Submit",command=self.confirm) self.botexit.pack() #loads the "planning revision" questionnaire if it's randomly selected def Revise(self): self.labtitle = Label(self,text="Topic for this test: What to do when you REVISE") self.labtitle.pack() self.q1 = Label(self,text="What do you need to do when you revise?") self.q1.pack() self.ac = ["Allocate a set amount of time for revision", "Take Regular Breaks", "Take very very very long sleeps", "Watch TV for no justifiable reason", "Make a revision timetable"] self.dr1store = StringVar(self) self.dr1store.set("Select one") self.dr2store = StringVar(self) self.dr2store.set("Select one") self.dr3store = StringVar(self) self.dr3store.set("Select one") for i in (self.dr1store,self.dr2store,self.dr3store): OptionMenu(self,i,*self.ac).pack() self.botexit = Button(self,text="Submit",command=self.confirm) self.botexit.pack() #checks for the right answers in the right order, and adds all results to resultlab, for the "fire drill" questionnaire def fireanswer(self): if self.ans1 == "Leave building immediately": self.correctans += 1 if self.ans2 == "Head to Central Meeting Point": self.correctans += 1 if self.ans3 == "Line up in groups": self.correctans += 1 if self.correctans == 3: self.stane = "Your order and selections were fully correct" else: self.stane = "Your order and selections were not fully correct" self.resultlab.append("""{0} Question: {1} Your responses: {2}, {3}, {4} Correct responses: Leave building immediately, Head to Central Meeting Point, Line up in groups Correct ordered responses: {5} out of 3 {6}""".format(self.labtitle['text'],self.q1['text'],self.ans1,self.ans2,self.ans3,self.correctans,self.stane)) self.master.destroy() #checks for the right answers in the right order, and adds all results to resultlab, for the "planning day" questionnaire def plananswer(self): if self.ans1 == "Pack your pencil case and put it in the bag": self.correctans += 1 if self.ans2 == "Check timetable to ensure you have packed the correct books": self.correctans += 1 if self.ans3 == "Pack all your bags the night before": self.correctans += 1 if self.correctans == 3: self.stane = "Your order and selections were fully correct" else: self.stane = "Your order and selections were not fully correct" self.resultlab.append("""{0} Question: {1} Your responses: {2}, {3}, {4} Correct responses: Pack your pencil case and put it in the bag, Check timetable to ensure you have packed the correct books, Pack all your bags the night before Correct ordered responses: {5} out of 3 {6}""".format(self.labtitle['text'],self.q1['text'],self.ans1,self.ans2,self.ans3,self.correctans,self.stane)) self.master.destroy() #checks for the right answers in the right order, and adds all results to resultlab, for the "planning revision" questionnaire def reviseanswer(self): if self.ans1 == "Make a revision timetable": self.correctans += 1 if self.ans2 == "Allocate a set amount of time for revision": self.correctans += 1 if self.ans3 == "Take Regular Breaks": self.correctans += 1 if self.correctans == 3: self.stane = "Your order and selections were fully correct" else: self.stane = "Your order and selections were not fully correct" self.resultlab.append("""{0} Question: {1} Your responses: {2}, {3}, {4} Correct responses: Make a revision timetable, Allocate a set amount of time for revision, Take Regular Breaks Correct ordered responses: {5} out of 3 {6}""".format(self.labtitle['text'],self.q1['text'],self.ans1,self.ans2,self.ans3,self.correctans,self.stane)) self.master.destroy() #creates the motivation class class MotivApp(Frame): #initiates the class's initial properties def __init__(self,master): super(MotivApp, self).__init__(master) self.master.protocol("WM_DELETE_WINDOW", self.confirmclosure) self.master.iconbitmap('aclogo.ico') self.attempts = [] self.resultlab = [] self.pack() self.app() #confirms whether the program will be closed when the top-right close button is clicked def confirmclosure(self): if messagebox.askokcancel("Quitting the program","Are you sure you want to close this test? Data entered will NEITHER be saved NOR be considered
from __future__ import absolute_import, division, unicode_literals import re import csv from rasa_sdk import Action from sample_data import * from rasa_sdk.forms import FormAction from rasa.core.policies.policy import confidence_scores_for from rasa_sdk.events import SlotSet, ConversationPaused, ReminderScheduled, ConversationResumed import random from datetime import datetime, timedelta, date from default_configurations import * ########################## # GENERAL ########################### # Intent: greetings.hello class HelloAndGreetings(Action): def name(self): return 'action_hello_user' def run(self, dispatcher, tracker, domain): try: mobile_number = tracker.get_slot("mobile_number") if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') authenticated_user_response_templates = ["Hi {user_name}, welcome to telenor customer care!", "Hey {user_name}, how can i help you?"] user_name = "" if mobile_number in user_data.keys(): user_name = single_user_object['name'] response = random.choice(authenticated_user_response_templates) dispatcher.utter_message(response.format(user_name=user_name)) # image_url = "http://crm2.univ-lorraine.fr/pages_perso/Aubert/FTenglish/conv2d/hello.jpg" # image_url = "./assets/telenor_pakistan.png" # image_url = "https://i.ibb.co/qMFfqnF/telenor-pakistan.png" # message = {"image_url": image_url} # dispatcher.utter_custom_json(message) except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with HelloAndGreetings: {}".format(e)) return None class ActionIntermediateFallback(Action): def name(self): return "action_default_ask_affirmation" def __init__(self): self.intent_mappings = {} # read the mapping and store it in a dictionary with open("intent_csv_mapping.txt", "r") as file: lines = file.read().split("\n") for line in lines: splits = line.split(":") if len(splits) > 1: self.intent_mappings[splits[0]] = splits[0] def run(self, dispatcher, tracker, domain): last_intent = tracker.latest_message['intent']['name'] last_intent_confidence = tracker.events[-1]['parse_data']['intent']['confidence'] if last_intent_confidence < 0.3 or last_intent in [None, 'out_of_scope']: response = "Please rephrase your question!" dispatcher.utter_message(response) # todo: status of consecuive_fallback slot? else: # sorted(tracker.latest_message['intent_ranking'], key=lambda x: x['confidence'], reverse=True)[0] message = "Did you mean '{}'?".format(last_intent) buttons = [{'title': 'Yes', 'payload': '/{}'.format(last_intent)}, {'title': 'No', 'payload': '/out_of_scope'}] dispatcher.utter_button_message(message, buttons=buttons) # dispatcher.utter_message(message) return [] # Intent: CustomFallBackAction def get_action_names(): with open("domain.yml", "r") as file: text = file.read() return [action.replace("- ", "") for action in text.split("actions:")[1].split(":")[0].split("\n")[:-1] if action] class CustomFallback(Action): def name(self): return 'action_custom_fallback' def run(self, dispatcher, tracker, domain): try: consecutive_fallback = tracker.get_slot("number_of_fallbacks") consecutive_fallback += 1.0 previous_actions = [event['name'] for event in tracker.events if 'name' in event and event['name'] in get_action_names()] response = default_fallback_response if previous_actions[-1] == "action_custom_fallback": if consecutive_fallback >= 2.0: # todo: this below mentioned response is being used in "api.get_server_response" & in LiveZilla UI print("Consecutive Fallbacks: ", consecutive_fallback) response = "Sorry, I can't understand your question. Please wait, your query is being transferred to relevant expert..." consecutive_fallback = 0.0 dispatcher.utter_message(response) return [SlotSet("number_of_fallbacks", consecutive_fallback)] else: # not consecutive fallback consecutive_fallback = 0.0 dispatcher.utter_message(response) return [SlotSet("number_of_fallbacks", consecutive_fallback)] except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with CustomFallback: {}".format(e)) return None # Intent: offers.info class OffersInformation(Action): def name(self): return 'action_offers_info' def run(self, dispatcher, tracker, domain): try: response = "here are available offers:" dispatcher.utter_message(response) offer_image_url = "https://i.ibb.co/BLYpFSB/whatsapp-offer-256x256.png" offer_details_url = "https://i.ibb.co/dm74DPP/whatsapp-offer-details-256x256.png" dispatcher.utter_custom_json({"image_url": offer_image_url}) dispatcher.utter_custom_json({"image_url": offer_details_url}) dispatcher.utter_message("Reply 'YES' to subscribe this offer.") except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with OffersInformation: {}".format(e)) return None # Intent: offers.info -- subscribe class OfferSubscribe(Action): def name(self): return 'action_offers_subscribe' def run(self, dispatcher, tracker, domain): try: verification_status = tracker.get_slot("verification") if verification_status == "yes": response = "You have successfully availed this offer." dispatcher.utter_message(response) except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with OfferSubscribe: {}".format(e)) return None ########################## # USER VERIFICATION ########################### # Intent: authenticate user ! class ActionVerificationForm(FormAction): def name(self): return "action_verification_required" @staticmethod def required_slots(tracker): return ["mobile_number", "pin"] def slot_mappings(self): return { "mobile_number": self.from_text(intent=None), "pin": self.from_text(intent=None) } def submit(self, dispatcher, tracker, domain): # get first match from input text mobile_number = re.search(r'(((034)|(9234))[0-9][-_. ]*[0-9]*)', tracker.get_slot("mobile_number")) pin = re.search(r'[0-9]*', tracker.get_slot("pin")) if mobile_number and pin: pin = pin.group(0) mobile_number = re.sub(r'[-_.+]', '', mobile_number.group(0)) # mobile_number = re.sub(r'^034', '9234', mobile_number) # standardize ot 9234x....... if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') if mobile_number in user_data.keys() and len(pin) == 4: dispatcher.utter_message("\nHi {}, You are authenticated against mobile_number:{} successfully!".format( user_data[mobile_number]['name'], mobile_number)) return [SlotSet('verification', "yes"), SlotSet('mobile_number', mobile_number), SlotSet('pin', pin)] elif mobile_number not in user_data.keys(): dispatcher.utter_message("Sorry, No record found against this number:{}".format(mobile_number)) elif len(pin) != 4: dispatcher.utter_message("Invalid PIN") # todo: this response is being used in 'chat_endpoint.get_response_from_chatbot', be cautious! dispatcher.utter_message("Authentication Failed. Repeat your query & enter correct credentials!") return [SlotSet('verification', "no")] ########################## # BALANCE ########################### # Intent: balance_info.info class BalanceInformation(Action): def name(self): return 'action_balance_info' # [event['parse_data']['entities'] for event in tracker.events if 'parse_data' in event.keys()] def run(self, dispatcher, tracker, domain): try: mobile_number = tracker.get_slot("mobile_number") if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') verification_status = tracker.get_slot("verification") if verification_status == "yes": balance = tracker.get_slot("current_balance_value") balance_validity = tracker.get_slot("current_balance_validity") response = "Your current balance is: Rs. {balance}, validity: {balance_validity}".format( balance=balance, balance_validity=balance_validity) dispatcher.utter_message(response) return None except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with BalanceInformation: {}".format(e)) return None # Intent: balance_info.info class BalanceExpiry(Action): def name(self): return 'action_balance_expiry' def run(self, dispatcher, tracker, domain): try: # check if balance slot is filled: # e.g. "expiry", "balance expiry", "akhri tareekh" ?? mobile_number = tracker.get_slot("mobile_number") if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') verification_status = tracker.get_slot("verification") if verification_status == "yes": balance_validity = tracker.get_slot("current_balance_validity") response = "Your balance is valid till: {balance_validity}".format(balance_validity=balance_validity) dispatcher.utter_message(response) except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with BalanceExpiry: {}".format(e)) return None # Intent: balance.balance_usage class BalanceUsage(Action): def name(self): return 'action_balance_usage_report' def run(self, dispatcher, tracker, domain): try: mobile_number = tracker.get_slot("mobile_number") if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') verification_status = tracker.get_slot("verification") if verification_status == "yes": balance_usage = tracker.get_slot("current_balance_usage") response = "Your total balance usage till now is Rs.{balance_usage}.".format(balance_usage=balance_usage) dispatcher.utter_message(response) except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with BalanceExpiry: {}".format(e)) return None ########################## # LAST RECHARGE ########################### # Intent: last_recharge.info class LastRechargeInfo(Action): def name(self): return 'action_last_recharge_info' def run(self, dispatcher, tracker, domain): try: mobile_number = tracker.get_slot("mobile_number") if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') verification_status = tracker.get_slot("verification") if verification_status == "yes": balance = tracker.get_slot("current_balance_value") balance_validity = tracker.get_slot("current_balance_validity") response = """Last time you recharged {last_recharge}. Your current balance is Rs. {balance}, validity {balance_validity}""".format( last_recharge=single_user_object['last_recharge'], balance=balance, balance_validity=balance_validity ) dispatcher.utter_message(response) except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with LastRechargeInfo: {}".format(e)) return None # Intent: last_recharge.date class LastRechargeDate(Action): def name(self): return 'action_last_recharge_date' def run(self, dispatcher, tracker, domain): try: mobile_number = tracker.get_slot("mobile_number") if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') verification_status = tracker.get_slot("verification") if verification_status == "yes": response = "Last time you recharged {last_recharge}.".format( last_recharge=single_user_object['last_recharge']) dispatcher.utter_message(response) except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with LastRechargeDate: {}".format(e)) return None ########################### # EMERGENCY LOAN ########################### # Intent: emergency_loan.subscribe class EmergencyLoanCurrentStatus(Action): def name(self): return 'action_emergency_loan_current_status' def run(self, dispatcher, tracker, domain): try: mobile_number = tracker.get_slot("mobile_number") if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') verification_status = tracker.get_slot("verification") if verification_status == "yes": loan_service_status = tracker.get_slot("loan_service_status") response = """Sorry, You haven't returned previous loan of Rs.20. You can not take more emergency balance till next recharge.""" if loan_service_status == "unsubscribed": response = "You haven't subscribed loan. Reply 'YES' to get emergency balance of Rs.20." dispatcher.utter_message(response) except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with EmergencyLoanCurrentStatus: {}".format(e)) return None # Intent: emergency_loan.subscribe class EmergencyLoanSubscribe(Action): def name(self): return 'action_emergency_loan_subscribe' def run(self, dispatcher, tracker, domain): try: mobile_number = tracker.get_slot("mobile_number") if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') verification_status = tracker.get_slot("verification") if verification_status == "yes": loan_service_status = tracker.get_slot("loan_service_status") loan_last_availed = tracker.get_slot("loan_last_availed") current_balance = tracker.get_slot("current_balance_value") # balance_usage = tracker.get_slot("current_balance_usage") balance_validity = tracker.get_slot("current_balance_validity") response = "Sorry, ap ne pichla emergency loan wapis nhi kia abi. Ap mazeed loan nhi hasil kr sakty!" if loan_service_status == "unsubscribed": loan_service_status = "subscribed" loan_last_availed = str(date.today()) response = "You have successfully availed emergency loan of Rs.20" current_balance += 20.0 balance_validity = "15-01-2020 " dispatcher.utter_message(response) return [SlotSet("loan_service_status", loan_service_status), SlotSet("loan_last_availed", loan_last_availed), SlotSet("current_balance_value", current_balance), SlotSet("current_balance_validity", balance_validity)] except Exception as e: dispatcher.utter_message("Sorry, Something went wrong. Apna swal dobara enter krain") print("\nERROR: Something went wrong with EmergencyLoanSubscribe: {}".format(e)) return None # Intent: emergency_loan.subscribe class EmergencyLoanLastAvailed(Action): def name(self): return 'action_emergency_loan_last_availed' def run(self, dispatcher, tracker, domain): try: mobile_number = tracker.get_slot("mobile_number") if mobile_number and mobile_number.startswith('034'): mobile_number = mobile_number.replace('034', '9234') verification_status = tracker.get_slot("verification") if verification_status == "yes": loan_last_availed = tracker.get_slot("loan_last_availed") response = "You availed emergency loan of Rs.20 on {}.".format(loan_last_availed) dispatcher.utter_message(response) except Exception as e: dispatcher.utter_message("Sorry, Something went
<gh_stars>0 # # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class lbvserver(base_resource) : """ Configuration for Load Balancing Virtual Server resource. """ def __init__(self) : self._name = "" self._servicetype = "" self._ipv46 = "" self._ippattern = "" self._ipmask = "" self._port = 0 self._range = 0 self._persistencetype = "" self._timeout = 0 self._persistencebackup = "" self._backuppersistencetimeout = 0 self._lbmethod = "" self._hashlength = 0 self._netmask = "" self._v6netmasklen = 0 self._cookiename = "" self._rule = "" self._listenpolicy = "" self._listenpriority = 0 self._resrule = "" self._persistmask = "" self._v6persistmasklen = 0 self._pq = "" self._sc = "" self._rtspnat = "" self._m = "" self._tosid = 0 self._datalength = 0 self._dataoffset = 0 self._sessionless = "" self._state = "" self._connfailover = "" self._redirurl = "" self._cacheable = "" self._clttimeout = 0 self._somethod = "" self._sopersistence = "" self._sopersistencetimeout = 0 self._healththreshold = 0 self._sothreshold = 0 self._sobackupaction = "" self._redirectportrewrite = "" self._downstateflush = "" self._backupvserver = "" self._disableprimaryondown = "" self._insertvserveripport = "" self._vipheader = "" self._authenticationhost = "" self._authentication = "" self._authn401 = "" self._authnvsname = "" self._push = "" self._pushvserver = "" self._pushlabel = "" self._pushmulticlients = "" self._tcpprofilename = "" self._httpprofilename = "" self._dbprofilename = "" self._comment = "" self._l2conn = "" self._oracleserverversion = "" self._mssqlserverversion = "" self._mysqlprotocolversion = 0 self._mysqlserverversion = "" self._mysqlcharacterset = 0 self._mysqlservercapabilities = 0 self._appflowlog = "" self._netprofile = "" self._icmpvsrresponse = "" self._rhistate = "" self._newservicerequest = 0 self._newservicerequestunit = "" self._newservicerequestincrementinterval = 0 self._minautoscalemembers = 0 self._maxautoscalemembers = 0 self._persistavpno = [] self._skippersistency = "" self._td = 0 self._authnprofile = "" self._macmoderetainvlan = "" self._dbslb = "" self._dns64 = "" self._bypassaaaa = "" self._recursionavailable = "" self._processlocal = "" self._weight = 0 self._servicename = "" self._redirurlflags = False self._newname = "" self._value = "" self._ipmapping = "" self._ngname = "" self._type = "" self._curstate = "" self._effectivestate = "" self._status = 0 self._lbrrreason = 0 self._redirect = "" self._precedence = "" self._homepage = "" self._dnsvservername = "" self._domain = "" self._policyname = "" self._cachevserver = "" self._health = 0 self._gotopriorityexpression = "" self._ruletype = 0 self._groupname = "" self._cookiedomain = "" self._map = "" self._gt2gb = "" self._consolidatedlconn = "" self._consolidatedlconngbl = "" self._thresholdvalue = 0 self._bindpoint = "" self._invoke = False self._labeltype = "" self._labelname = "" self._version = 0 self._totalservices = 0 self._activeservices = 0 self._statechangetimesec = "" self._statechangetimeseconds = 0 self._statechangetimemsec = 0 self._tickssincelaststatechange = 0 self._isgslb = False self._vsvrdynconnsothreshold = 0 self.___count = 0 @property def name(self) : """Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created. CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : """Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created. CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1 """ try : self._name = name except Exception as e: raise e @property def servicetype(self) : """Protocol used by the service (also called the service type).<br/>Possible values = HTTP, FTP, TCP, UDP, SSL, SSL_BRIDGE, SSL_TCP, DTLS, NNTP, DNS, DHCPRA, ANY, SIP_UDP, DNS_TCP, RTSP, PUSH, SSL_PUSH, RADIUS, RDP, MYSQL, MSSQL, DIAMETER, SSL_DIAMETER, TFTP, ORACLE. """ try : return self._servicetype except Exception as e: raise e @servicetype.setter def servicetype(self, servicetype) : """Protocol used by the service (also called the service type).<br/>Possible values = HTTP, FTP, TCP, UDP, SSL, SSL_BRIDGE, SSL_TCP, DTLS, NNTP, DNS, DHCPRA, ANY, SIP_UDP, DNS_TCP, RTSP, PUSH, SSL_PUSH, RADIUS, RDP, MYSQL, MSSQL, DIAMETER, SSL_DIAMETER, TFTP, ORACLE """ try : self._servicetype = servicetype except Exception as e: raise e @property def ipv46(self) : """IPv4 or IPv6 address to assign to the virtual server. """ try : return self._ipv46 except Exception as e: raise e @ipv46.setter def ipv46(self, ipv46) : """IPv4 or IPv6 address to assign to the virtual server. """ try : self._ipv46 = ipv46 except Exception as e: raise e @property def ippattern(self) : """IP address pattern, in dotted decimal notation, for identifying packets to be accepted by the virtual server. The IP Mask parameter specifies which part of the destination IP address is matched against the pattern. Mutually exclusive with the IP Address parameter. For example, if the IP pattern assigned to the virtual server is 198.51.100.0 and the IP mask is 255.255.240.0 (a forward mask), the first 20 bits in the destination IP addresses are matched with the first 20 bits in the pattern. The virtual server accepts requests with IP addresses that range from 172.16.58.3 to 172.16.31.10. You can also use a pattern such as 0.0.2.2 and a mask such as 0.0.255.255 (a reverse mask). If a destination IP address matches more than one IP pattern, the pattern with the longest match is selected, and the associated virtual server processes the request. For example, if virtual servers vs1 and vs2 have the same IP pattern, 0.0.100.128, but different IP masks of 0.0.255.255 and 0.0.224.255, a destination IP address of 198.51.100.128 has the longest match with the IP pattern of vs1. If a destination IP address matches two or more virtual servers to the same extent, the request is processed by the virtual server whose port number matches the port number in the request. """ try : return self._ippattern except Exception as e: raise e @ippattern.setter def ippattern(self, ippattern) : """IP address pattern, in dotted decimal notation, for identifying packets to be accepted by the virtual server. The IP Mask parameter specifies which part of the destination IP address is matched against the pattern. Mutually exclusive with the IP Address parameter. For example, if the IP pattern assigned to the virtual server is 198.51.100.0 and the IP mask is 255.255.240.0 (a forward mask), the first 20 bits in the destination IP addresses are matched with the first 20 bits in the pattern. The virtual server accepts requests with IP addresses that range from 172.16.58.3 to 172.16.31.10. You can also use a pattern such as 0.0.2.2 and a mask such as 0.0.255.255 (a reverse mask). If a destination IP address matches more than one IP pattern, the pattern with the longest match is selected, and the associated virtual server processes the request. For example, if virtual servers vs1 and vs2 have the same IP pattern, 0.0.100.128, but different IP masks of 0.0.255.255 and 0.0.224.255, a destination IP address of 198.51.100.128 has the longest match with the IP pattern of vs1. If a destination IP address matches two or more virtual servers to the same extent, the request is processed by the virtual server whose port number matches the port number in the request. """ try : self._ippattern = ippattern except Exception as e: raise e @property def ipmask(self) : """IP mask, in dotted decimal notation, for the IP Pattern parameter. Can have leading or trailing non-zero octets (for example, 255.255.240.0 or 0.0.255.255). Accordingly, the mask specifies whether the first n bits or the last n bits of the destination IP address in a client request are to be matched with the corresponding bits in the IP pattern. The former is called a forward mask. The latter is called a reverse mask. """ try : return self._ipmask except Exception as e: raise e @ipmask.setter def ipmask(self, ipmask) : """IP mask, in dotted decimal notation, for the IP Pattern parameter. Can have leading or trailing non-zero octets (for example, 255.255.240.0 or 0.0.255.255). Accordingly, the mask specifies whether the first n bits or the last n bits of the destination IP address in a client request are to be matched with the corresponding bits in the IP pattern. The former is called a forward mask. The latter is called a reverse mask. """ try : self._ipmask = ipmask except Exception as e: raise e @property def port(self) : """Port number for the virtual server.<br/>Range 1 - 65535. """ try : return self._port except Exception as e: raise e @port.setter def port(self, port)
# coding: utf-8 import math import webbrowser import wx from contextlib import contextmanager from concurrent.futures import Future from functools import partial from pathlib import Path from bookworm import typehints as t from bookworm import app from bookworm import config from bookworm import speech from bookworm.concurrency import threaded_worker, CancellationToken from bookworm.resources import sounds, app_icons from bookworm.paths import app_path, fonts_path from bookworm.document import ( DummyDocument, DocumentRestrictedError, ArchiveContainsNoDocumentsError, ArchiveContainsMultipleDocuments, ) from bookworm.structured_text import Style, SEMANTIC_ELEMENT_OUTPUT_OPTIONS from bookworm.reader import ( EBookReader, UriResolver, ReaderError, ResourceDoesNotExist, UnsupportedDocumentError, DecryptionRequired, ) from bookworm.signals import ( reader_book_loaded, reader_book_unloaded, reading_position_change, ) from bookworm.structured_text import TextRange from bookworm.gui.contentview_ctrl import ContentViewCtrl from bookworm.gui.components import TocTreeManager, AsyncSnakDialog from bookworm.utils import gui_thread_safe from bookworm.logger import logger from . import recents_manager from .menubar import MenubarProvider, BookRelatedMenuIds from .state import StateProvider from .navigation import NavigationProvider log = logger.getChild(__name__) # Style to wx TextCtrl Styles STYLE_TO_WX_TEXT_ATTR_STYLES = { Style.BOLD: (wx.TextAttr.SetFontWeight, (wx.FONTWEIGHT_BOLD,)), Style.ITALIC: (wx.TextAttr.SetFontStyle, (wx.FONTSTYLE_ITALIC,)), Style.MONOSPACED: (wx.TextAttr.SetFontStyle, (wx.FONTSTYLE_ITALIC,)), Style.UNDERLINED: (wx.TextAttr.SetFontUnderlined, (True,)), Style.STRIKETHROUGH: ( wx.TextAttr.SetTextEffects, (wx.TEXT_ATTR_EFFECT_STRIKETHROUGH,), ), Style.SUPERSCRIPT: (wx.TextAttr.SetTextEffects, (wx.TEXT_ATTR_EFFECT_SUPERSCRIPT,)), Style.SUBSCRIPT: (wx.TextAttr.SetTextEffects, (wx.TEXT_ATTR_EFFECT_SUBSCRIPT,)), Style.HIGHLIGHTED: (wx.TextAttr.SetBackgroundColour, (wx.YELLOW,)), Style.DISPLAY_1: (wx.TextAttr.SetFontWeight, (800,)), Style.DISPLAY_2: (wx.TextAttr.SetFontWeight, (600,)), Style.DISPLAY_3: (wx.TextAttr.SetFontWeight, (400,)), Style.DISPLAY_4: (wx.TextAttr.SetFontWeight, (200,)), } class ResourceLoader: """Loads a document into the view.""" def __init__(self, view, uri, callback=None): self.view = view self.callback = callback self._cancellation_token = CancellationToken() self.init_resolver(uri) def init_resolver(self, uri): try: resolver = UriResolver(uri) except ReaderError as e: log.exception(f"Failed to resolve document uri: {uri}", exc_info=True) self.view.notify_user( _("Failed to open document"), _( "The document you are trying to open could not be opened in Bookworm." ), icon=wx.ICON_ERROR, ) return if not resolver.should_read_async(): doc = self.resolve_document(resolver.read_document, uri) else: AsyncSnakDialog( task=partial(resolver.read_document), done_callback=lambda fut: self.resolve_document(fut.result, uri), dismiss_callback=lambda: self._cancellation_token.request_cancellation() or True, message=_("Opening document, please wait..."), parent=self.view, ) def resolve_document(self, resolve_doc_func, uri): _last_exception = None try: doc = resolve_doc_func() if doc is not None: self.load(doc) except DecryptionRequired: self.view.decrypt_document(uri) except ResourceDoesNotExist as e: _last_exception = e log.exception("Failed to open file. File does not exist", exc_info=True) wx.CallAfter( self.view.notify_user, # Translators: the title of an error message _("Document not found"), # Translators: the content of an error message _("Could not open Document.\nThe document does not exist."), icon=wx.ICON_ERROR, ) except DocumentRestrictedError as e: _last_exception = e log.exception("Failed to open document. The document is restricted by the author.", exc_info=True) wx.CallAfter( self.view.notify_user, # Translators: the title of an error message _("Document Restricted"), # Translators: the content of an error message _("Could not open Document.\nThe document is restricted by the publisher."), icon=wx.ICON_ERROR, ) except UnsupportedDocumentError as e: _last_exception = e log.exception("Unsupported file format", exc_info=True) wx.CallAfter( self.view.notify_user, # Translators: the title of a message shown # when the format of the e-book is not supported _("Unsupported Document Format"), # Translators: the content of a message shown # when the format of the e-book is not supported _("The format of the given document is not supported by Bookworm."), icon=wx.ICON_WARNING, ) except ArchiveContainsNoDocumentsError as e: _last_exception = e log.exception("Archive contains no documents", exc_info=True) wx.CallAfter( self.view.notify_user, # Translators: the title of an error message _("Archive contains no documents"), # Translators: the content of an error message _( "Bookworm cannot open this archive file.\nThe archive contains no documents." ), icon=wx.ICON_ERROR, ) except ArchiveContainsMultipleDocuments as e: log.info("Archive contains multiple documents") dlg = wx.SingleChoiceDialog( self.view, _("Documents"), _("Multiple documents found"), e.args[0], wx.CHOICEDLG_STYLE, ) if dlg.ShowModal() == wx.ID_OK: member = dlg.GetStringSelection() new_uri = uri.create_copy( view_args={'member': member} ) self.view.open_uri(new_uri) except ReaderError as e: _last_exception = e log.exception("Unsupported file format", exc_info=True) wx.CallAfter( self.view.notify_user, # Translators: the title of an error message _("Error Opening Document"), # Translators: the content of an error message _( "Could not open file\n." "Either the file has been damaged during download, " "or it has been corrupted in some other way." ), icon=wx.ICON_ERROR, ) except Exception as e: _last_exception = e log.exception("Unknown error occurred", exc_info=True) wx.CallAfter( self.view.notify_user, # Translators: the title of an error message _("Error Openning Document"), # Translators: the content of an error message _( "Could not open document.\n" "An unknown error occurred while loading the file." ), icon=wx.ICON_ERROR, ) finally: if _last_exception is not None: wx.CallAfter(self.view.unloadCurrentEbook) if uri.view_args.get('from_list'): retval = wx.MessageBox( # Translators: content of a message _("Failed to open document.\nWould you like to remove its entry from the 'recent documents' and 'pinned documents' lists?"), # Translators: title of a message box _("Remove from lists?"), style=wx.YES_NO | wx.ICON_WARNING ) if retval == wx.YES: recents_manager.remove_from_recents(uri) recents_manager.remove_from_pinned(uri) self.view.fileMenu.populate_recent_file_list() self.view.fileMenu.populate_pinned_documents_list() if app.debug: raise _last_exception def load(self, document): if (document is None) or (self._cancellation_token.is_cancellation_requested()): return self.view.load_document(document) if self.callback is not None: self.callback() class BookViewerWindow(wx.Frame, MenubarProvider, StateProvider): """The book viewer window.""" def __init__(self, parent, title): wx.Frame.__init__(self, parent, -1, title, name="main_window") self.setFrameIcon() self.reader = EBookReader(self) self._book_loaded_handlers = [] self.createControls() self.toolbar = self.CreateToolBar() self.toolbar.SetWindowStyle( wx.TB_FLAT | wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_TEXT ) self.statusBar = self.CreateStatusBar() self._nav_provider = NavigationProvider( ctrl=self.contentTextCtrl, reader=self.reader, zoom_callback=self.onTextCtrlZoom, view=self, ) # A timer to save the current position to the database self.userPositionTimer = wx.Timer(self) # Bind Events self.Bind(wx.EVT_TIMER, self.onUserPositionTimerTick, self.userPositionTimer) self.tocTreeCtrl.Bind(wx.EVT_SET_FOCUS, self.onTocTreeFocus, self.tocTreeCtrl) self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.onTOCItemClick, self.tocTreeCtrl) self.Bind( wx.EVT_TOOL, lambda e: self.onTextCtrlZoom(-1), id=wx.ID_PREVIEW_ZOOM_OUT ) self.Bind( wx.EVT_TOOL, lambda e: self.onTextCtrlZoom(1), id=wx.ID_PREVIEW_ZOOM_IN ) self.toc_tree_manager = TocTreeManager(self.tocTreeCtrl) # Set status bar text # Translators: the text of the status bar when no book is currently open. # It is being used also as a label for the page content text area when no book is opened. self._no_open_book_status = _("Press (Ctrl + O) to open a document") self._has_text_zoom = False self.__latest_structured_navigation_position = None self.set_status(self._no_open_book_status) StateProvider.__init__(self) MenubarProvider.__init__(self) def createControls(self): # Now create the Panel to put the other controls on. rect = wx.GetClientDisplayRect() panel = wx.Panel(self, size=(rect.width * 0.8, rect.height * 0.75)) # Create the book reader controls # Translators: the label of the table-of-contents tree tocTreeLabel = wx.StaticText(panel, -1, _("Table of Contents")) self.tocTreeCtrl = wx.TreeCtrl( panel, size=(280, 160), style=wx.TR_TWIST_BUTTONS | wx.TR_LINES_AT_ROOT | wx.TR_FULL_ROW_HIGHLIGHT | wx.TR_SINGLE | wx.TR_ROW_LINES, name="toc_tree", ) # Translators: the label of the text area which shows the # content of the current page self.contentTextCtrlLabel = wx.StaticText(panel, -1, _("Content")) self.contentTextCtrl = ContentViewCtrl( panel, size=(200, 160), name="content_view", ) self.contentTextCtrl.SetMargins(self._get_text_view_margins()) self.readingProgressBar = wx.Gauge( panel, -1, style=wx.GA_HORIZONTAL | wx.GA_SMOOTH ) # Use a sizer to layout the controls, stacked horizontally and with # a 10 pixel border around each mainSizer = wx.BoxSizer(wx.HORIZONTAL) lftSizer = wx.BoxSizer(wx.VERTICAL) rgtSizer = wx.BoxSizer(wx.VERTICAL) rgtBottomSizer = wx.BoxSizer(wx.HORIZONTAL) lftSizer.Add(tocTreeLabel, 0, wx.ALL, 5) lftSizer.Add(self.tocTreeCtrl, 1, wx.ALL, 5) rgtSizer.Add(self.contentTextCtrlLabel, 0, wx.EXPAND | wx.ALL, 5) rgtSizer.Add(self.contentTextCtrl, 1, wx.EXPAND | wx.ALL, 5) rgtBottomSizer.Add(self.readingProgressBar, 1, wx.EXPAND | wx.ALL, 1) rgtSizer.Add(rgtBottomSizer, 0, wx.ALL | wx.EXPAND, 4) mainSizer.Add(lftSizer, 0, wx.ALL | wx.EXPAND, 10) mainSizer.Add(rgtSizer, 1, wx.ALL | wx.EXPAND, 10) panel.SetSizer(mainSizer) panel.Layout() # And also use a sizer to manage the size of the panel such # that it fills the frame sizer = wx.BoxSizer() sizer.Add(panel, 1, wx.EXPAND) self.SetSizer(sizer) self.Fit() self.SetSize(self.GetSize()) self.CenterOnScreen(wx.BOTH) def finalize_gui_creation(self): opendyslexic_font_filename = fonts_path( "opendyslexic", "OpenDyslexic-Regular.ttf" ) wx.Font.AddPrivateFont(str(opendyslexic_font_filename)) self.set_content_view_font() self.add_tools() self.toolbar.Realize() # Process services menubar for retval in wx.GetApp().service_handler.process_menubar(self.menuBar): if retval is None: continue menu_order, menu_object, menu_label = retval self.registerMenu(menu_order, menu_object, menu_label) self.doAddMenus() self.SetMenuBar(self.menuBar) # Set accelerators for the menu items self._set_menu_accelerators() if config.conf["appearance"]["start_maximized"]: self.Maximize() # XXX sent explicitly to disable items upon startup reader_book_unloaded.send(self.reader) def set_content_view_font(self): configured_text_style = self.get_content_view_text_style() self.contentTextCtrl.SetStyle(0, self.contentTextCtrl.GetLastPosition(), configured_text_style) self.contentTextCtrl.SetDefaultStyle(configured_text_style) def get_content_view_text_style(self, *, font_size=None): finfo = wx.FontInfo().FaceName(config.conf["appearance"]["font_facename"]) configured_font = wx.Font(finfo) font_point_size = font_size if font_size is not None else config.conf["appearance"]["font_point_size"] configured_font.SetPointSize(font_point_size) if config.conf["appearance"]["use_bold_font"]: configured_font.SetWeight(wx.FONTWEIGHT_BOLD) base_text_style = self.contentTextCtrl.GetDefaultStyle() base_text_style.SetFont(configured_font) return base_text_style def add_tools(self): tsize = (16, 16) self.toolbar.SetToolBitmapSize(tsize) tool_info = [ # Translators: the label of a button in the application toolbar (0, "open", _("Open"), wx.ID_OPEN), (1, "", "", None), # Translators: the label of a button in the application toolbar (10, "search", _("Search"), wx.ID_FIND), # Translators: the label of a button in the application toolbar (20, "reading_mode", _("Mode"), BookRelatedMenuIds.changeReadingMode), (32, "", "", None), # Translators: the label of a button in the application toolbar (60, "zoom_out", _("Big"), wx.ID_PREVIEW_ZOOM_OUT), # Translators: the label of a button in the application toolbar (70, "zoom_in", _("Small"), wx.ID_PREVIEW_ZOOM_IN), (71, "", "", None), ] tool_info.extend(wx.GetApp().service_handler.get_toolbar_items()) tool_info.sort() for (pos, imagename, label, ident) in tool_info: if ident is None: self.toolbar.AddSeparator() continue image = getattr(app_icons, imagename).GetBitmap() # Add toolbar item self.toolbar.AddTool(ident, label, image) def add_load_handler(self, func): self._book_loaded_handlers.append(func) def invoke_load_handlers(self): for func in self._book_loaded_handlers: func(self.reader) def default_book_loaded_callback(self): self.userPositionTimer.Start(1200) if self.contentTextCtrl.HasFocus(): self.tocTreeCtrl.SetFocus() def load_document(self, document) ->
<gh_stars>1-10 # -*- coding: utf-8 -*- from django.conf import settings from django.db import models from django.db.models import Q, QuerySet from django.contrib.postgres.fields import ArrayField from django.utils.functional import cached_property from cms.models.pluginmodel import CMSPlugin from djangocms_text_ckeditor.fields import HTMLField from allink_core.core.utils import get_additional_templates, camelcase_to_separated_lowercase from allink_core.core.models.fields import CMSPluginField from allink_core.core_apps.allink_categories.models import AllinkCategory __all__ = [ 'AllinkBaseAppContentPlugin', 'AllinkBaseFormPlugin', 'AllinkBaseSearchPlugin', 'AllinkBaseSectionPlugin', ] class AllinkBaseAppContentPlugin(CMSPlugin): """ Base plugin which provides standard functionality all Content App-Plugins should inherit from this, to create a "app pointer plugin" - allows to display application content for different apps - ability to filter and sort entries - manually select entries (search entries and select/ sort) """ data_model = None # PAGINATION NO = 'no' LOAD = 'load' LOAD_REST = 'load_rest' LOAD_URL = 'load_url' # PAGES = 'pages' PAGINATION_TYPE = ( (NO, 'None'), (LOAD, 'Add "Load more"-Button'), (LOAD_REST, 'Add "Load all"-Button'), (LOAD_URL, 'Add "Custom URL"-Button'), # (PAGES, 'Page Navigation'), ) # COLUMN AMOUNT COLUMN_AMOUNT = ( (1, 1), (2, 2), (3, 3), (4, 4), ) # ORDERING DEFAULT = 'default' TITLE_ASC = 'title_asc' TITLE_DESC = 'title_desc' LATEST = 'latest' EARLIEST = 'earliest' CATEGORY = 'category' ORDERING = ( (DEFAULT, '---------'), (TITLE_ASC, 'A-Z (title)'), (TITLE_DESC, 'Z-A (title)'), (LATEST, 'latest first'), (EARLIEST, 'earliest first'), (CATEGORY, 'category ordering'), ) FILTERING = ( (DEFAULT, '---------'), ) # FIELDS categories = models.ManyToManyField( AllinkCategory, blank=True ) categories_and = models.ManyToManyField( AllinkCategory, blank=True, related_name='%(app_label)s_%(class)s_categories_and' ) manual_filtering = models.CharField( max_length=50, null=True, blank=True ) manual_ordering = models.CharField( max_length=50, null=True, blank=True ) # manual_entries -> defined in subclasses (no elegant way found to define this here.) # apphook_page -> defined in subclasses (no elegant way found to define this here.) template = models.CharField( 'Template', help_text='Choose a template.', max_length=50 ) category_navigation_enabled = models.BooleanField( 'Show category navigation', help_text='If checked, a filter navigation with all selected categories is displayed.' '<br>Please note: A category is only displayed if it contains items.', default=False ) category_navigation_all = models.BooleanField( 'Show category "all"', help_text='If checked, a category "all" in filter navigation is displayed.', default=False ) category_navigation = models.ManyToManyField( AllinkCategory, related_name='%(app_label)s_%(class)s_category_navigation', verbose_name='Categories for Navigation', help_text='You can explicitly define the categories for the category navigation here.' ' This will override the automatically set of categories' ' (either the one generated from "Filter & Ordering" or "Manual entries")', blank=True, ) softpage_enabled = models.BooleanField( 'Show detailed information in Softpage', help_text='If checked, the detail view of an entry will be displayed in a "softpage".' ' Otherwise the page will be reloaded.', default=True ) detail_link_enabled = models.BooleanField( 'Show detail link', help_text='If checked, a link/button to the detail view will be displayed.', default=True ) items_per_row = models.IntegerField( 'Grid items per row', help_text='Only applied if a "Grid" template has been selected.', choices=COLUMN_AMOUNT, default=3 ) paginated_by = models.IntegerField( 'Max. entries per page', default=0, help_text='Limit the number of entries (in case of the "load more" pagination type: entries per page).' ' Default is "0" (show all entries)' ) pagination_type = models.CharField( 'Pagination Type', max_length=50, choices=PAGINATION_TYPE, default=PAGINATION_TYPE[0] ) load_more_button_text = models.CharField( 'Text for "Load .."-Button', help_text='If left blank, a default text will be used. <br>Note: Should the default text be adjusted site-wide,' ' please contact the project manager (such changes can be made on a code level)', # noqa max_length=255, null=True, blank=True ) detail_link_text = models.CharField( 'Text for "Detail"-Link', help_text='If left blank, a default text will be used.<br>Note: Should the default text be adjusted site-wide,' ' please contact the project manager (such changes can be made on a code level)', max_length=255, null=True, blank=True ) project_css_classes = ArrayField( models.CharField( max_length=50, blank=True, null=True ), blank=True, null=True ) cmsplugin_ptr = CMSPluginField() class Meta: abstract = True def __str__(self): return str(self.pk) @classmethod def get_templates(cls): templates = () for x, y in get_additional_templates(cls.data_model._meta.model_name): templates += ((x, y),) return templates @classmethod def get_filtering_choices(cls): return cls.FILTERING @classmethod def get_ordering_choices(cls): return cls.ORDERING @cached_property def css_classes(self): css_classes = [] if getattr(self, 'project_css_classes'): for css_class in getattr(self, 'project_css_classes'): css_classes.append(css_class) css_classes.append('{}-template'.format(self.template)) if self.template else None css_classes.append('items-per-row-{}'.format(self.items_per_row)) if self.items_per_row else None return ' '.join(css_classes) def copy_relations(self, oldinstance): for i in oldinstance.categories.all(): self.categories.add(i) for i in oldinstance.categories_and.all(): self.categories_and.add(i) for i in oldinstance.category_navigation.all(): self.category_navigation.add(i) for i in oldinstance.manual_entries.all(): self.manual_entries.add(i) def get_model_name(self): return self.data_model._meta.model_name def get_app_can_have_categories(self): if self.data_model._meta.model_name in dict(settings.PROJECT_APP_MODEL_WITH_CATEGORY_CHOICES): return True else: return False def get_correct_template(self, file): # file can only be '_items', 'content', 'no_results' if file != 'no_results': template = '{}/plugins/{}/{}.html'.format(self.data_model._meta.app_label, self.template, file) else: template = '{}/plugins/{}.html'.format(self.data_model._meta.app_label, file) return template @cached_property def fetch_categories(self): return self.categories.all() @cached_property def fetch_first_category(self): return self.categories.first() @cached_property def fetch_categories_and(self): return self.categories_and.all() @cached_property def fetch_manual_entries(self): return self.manual_entries.active() @cached_property def fetch_category_navigation(self): category_navigation = [] # if manual entries are selected the category navigation # is created from all distinct categories in selected entries if self.fetch_manual_entries: for entry in self.fetch_manual_entries: for category in entry.fetch_categories: if category not in category_navigation: category_navigation.append(category) else: # override auto category nav if self.category_navigation.exists(): for category in self.category_navigation.all(): if isinstance(self.get_render_queryset_for_display(category), QuerySet): if self.get_render_queryset_for_display(category): category_navigation.append(category) else: if len(self.get_render_queryset_for_display(category)): category_navigation.append(category) # auto category nav else: if self.fetch_categories: for category in self.fetch_categories: if len(self.get_render_queryset_for_display(category)) or \ self.get_render_queryset_for_display(category): category_navigation.append(category) # auto category nav, if no categories are specified else: from allink_core.core_apps.allink_categories.models import AllinkCategory categories = self.get_render_queryset_for_display().filter(~Q(categories=None)).values_list( 'categories') category_navigation = list(AllinkCategory.objects.filter(id__in=categories).distinct()) return category_navigation def _apply_filtering_to_queryset_for_display(self, queryset): """ applies individual query filters on given queryset. override in app instance (i.e. events) for custom filters see allink_core/apps/events/abstract_models.py for reference """ return queryset def _apply_ordering_to_queryset_for_display(self, queryset): language_code = self.language # latest if self.manual_ordering == AllinkBaseAppContentPlugin.LATEST: return queryset.latest() # earliest elif self.manual_ordering == AllinkBaseAppContentPlugin.EARLIEST: return queryset.earliest() # A-Z elif self.manual_ordering == AllinkBaseAppContentPlugin.TITLE_ASC: return queryset.title_asc(language_code) # Z-A elif self.manual_ordering == AllinkBaseAppContentPlugin.TITLE_DESC: return queryset.title_desc(language_code) # category elif self.manual_ordering == AllinkBaseAppContentPlugin.CATEGORY: # https://code.djangoproject.com/ticket/24218 # To remove duplicates in queryset and return a queryset instead # of a list as there can be further filtering return queryset.model.objects.filter(id__in=set(queryset.category().values_list('id', flat=True))) else: return queryset.distinct() def get_render_queryset_for_display(self, category=None): """ returns all data_model objects distinct to id which are in the selected categories - category: category instance -> adds additional query after refactoring: - category will be supplied via filters, request will be removed """ apply_ordering = True # manual entries if self.fetch_manual_entries: apply_ordering = False queryset = self.fetch_manual_entries else: queryset = self.data_model.objects.active() if self.fetch_categories or category: if category: queryset = queryset.filter_by_category(category) else: queryset = queryset.filter_by_categories(categories=self.fetch_categories) if self.fetch_categories_and: queryset = queryset.filter_by_categories(categories=self.fetch_categories_and) # apply filtering queryset = self._apply_filtering_to_queryset_for_display(queryset) # apply ordering if apply_ordering: queryset = self._apply_ordering_to_queryset_for_display(queryset) # hook for prefetching related queryset = self._get_queryset_with_prefetch_related(queryset) return queryset def _get_queryset_with_prefetch_related(self, ordered_qs): if type(ordered_qs) is list: return ordered_qs return ordered_qs.prefetch_related('translations', 'preview_image') class AllinkBaseSearchPlugin(CMSPlugin): data_model = None template = models.CharField( 'Template', help_text='Choose a template.', max_length=50, default='search_grid_static' ) project_css_classes = ArrayField( models.CharField( max_length=50, blank=True, null=True ), blank=True, null=True ) cmsplugin_ptr = CMSPluginField() class Meta: abstract = True def __str__(self): return str(self.pk) def get_model_name(self): return self.data_model._meta.model_name @cached_property def css_classes(self): css_classes = [] if getattr(self, 'project_css_classes'): for css_class in getattr(self, 'project_css_classes'): css_classes.append(css_class) return ' '.join(css_classes) @classmethod def get_templates(cls): templates = () for x, y in get_additional_templates('{}_SEARCH'.format(cls.data_model._meta.model_name)): templates += ((x, y),) return templates class AllinkBaseFormPlugin(CMSPlugin): form_text = HTMLField( 'Form text', blank=True, help_text='This text will be shown, before the actual form fields and later replaced with the success message.' ) email_subject = models.CharField( 'External email subject', max_length=255, blank=True, ) from_email_address = models.EmailField( 'From e-mail address', default=settings.DEFAULT_FROM_EMAIL, ) internal_recipients = ArrayField( models.EmailField( blank=True, ), blank=True, null=True, verbose_name='Internal e-mail recipients', ) success_message = HTMLField( 'Success message', blank=True, help_text='This text will be shown, after form completion.' ) cmsplugin_ptr = CMSPluginField() class Meta: abstract = True class AllinkBaseSectionPlugin(CMSPlugin): """ Base Plugin used for plugins which represent a section on a page. These plugins will be placed on the outer most plugin level. These plugins will be used to better control layout behavior in a more specific and more opinionated manner as the AllinkContentPlugin does. """ # overwrite the set of columns tuples on a per plugin level # (make sure a corresponding width_alias is defined in settings.py) COLUMNS = ( ('1-of-1', 'One Column'), ('1-of-2', 'Two Columns'), ('1-of-3', 'Three Columns'), ('1-of-4', 'Four Columns'), ) # overwrite the set of columns order tuples on a per plugin level COLUMN_ORDERS = ( ('default',
<filename>channels_graphql_ws/graphql_ws.py<gh_stars>0 # # coding: utf-8 # Copyright (c) 2019 DATADVANCE # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """GraphQL over WebSockets implementation with subscriptions. This module contains implementation of GraphQL WebSocket protocol. The implementation bases on the Graphene and the Channels 2. The `Subscription` class itself is a "creative" copy of `Mutation` class from the Graphene (`graphene/types/mutation.py`). The `GraphqlWsConsumer` is a Channels WebSocket consumer which maintains WebSocket connection with the client. Implementation assumes that client uses the protocol implemented by the library `subscription-transport-ws` (which is used by Apollo). """ # NOTE: The motivation is that currently there is no viable Python-based # GraphQL subscriptions implementation out of the box. Hopefully there # is a promising GraphQL WS https://github.com/graphql-python/graphql-ws # library by the Graphene authors. In particular this pull request # https://github.com/graphql-python/graphql-ws/pull/9 gives a hope that # implementation in the current file can be replaced with GraphQL WS one # day. # NOTE: Links based on which this functionality is implemented: # - Protocol description: # https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md # https://github.com/apollographql/subscriptions-transport-ws/blob/master/src/message-types.ts # - ASGI specification for WebSockets: # https://github.com/django/asgiref/blob/master/specs/www.rst#websocket # - GitHubGist with the root of inspiration: # https://gist.github.com/tricoder42/af3d0337c1b33d82c1b32d12bd0265ec import asyncio import collections import concurrent import dataclasses import functools import hashlib import inspect import logging import traceback import types import typing import weakref import asgiref.sync import channels.db import channels.generic.websocket as ch_websocket import channels.layers import django.core.serializers import django.db import graphene import graphene.types.objecttype import graphene.types.utils import graphene.utils.get_unbound_function import graphene.utils.props import graphql import graphql.error import graphql.execution.executors.asyncio import msgpack import promise import rx # Module logger. log = logging.getLogger(__name__) # WebSocket subprotocol used for the GraphQL. GRAPHQL_WS_SUBPROTOCOL = "graphql-ws" class Subscription(graphene.ObjectType): """Subscription type definition. Subclass this class to define a GraphQL subscription. The class works with `GraphqlWsConsumer` which maintains a WebSocket connection with the client. The subclass specifies the following methods. You can define each of them as a `@classmethod`, as a `@staticmethod`, or even as a regular method (like Graphene typically does). It shall work fine either way. NOTE, if you define the method as a regular method (not a classmethod or a staticmethod) you will receive the first argument (`payload`/`root`) into the `self` argument. [async] publish(payload, info, *args, **kwds): This method invoked each time subscription "triggers". Raising an exception here will lead to sending the notification with the error. To suppress the notification return `Subscription.SKIP`. Can be implemented both as a synchronous or as a coroutine function. In both cases a method runs in a worker thread from the GraphQL-processing threadpool. Required. Args: payload: The `payload` from the `broadcast` invocation. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. Returns: The same the any Graphene resolver returns. Returning a special object `Subscription.SKIP` indicates that this notification shall not be sent to the client at all. [async] subscribe(root, info, *args, **kwds): Called when client subscribes. Define this to do some extra work when client subscribes and to group subscriptions into different subscription groups. Method signature is the same as in other GraphQL "resolver" methods but it may return the subscription groups names to put the subscription into. Can be implemented both as a synchronous or as a coroutine function. In both cases a method runs in a worker thread from the GraphQL-processing threadpool. Optional. Args: root: Root resolver object. Typically `None`. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. Returns: The list or tuple of subscription group names this subscription instance belongs to. Later the subscription will trigger on publishes to any of that groups. If method returns None (default behavior) then the subscription is only put to the default group (the one which corresponds to the `Subscription` subclass). [async] unsubscribed(root, info, *args, **kwds): Called when client unsubscribes. Define this to be notified when client unsubscribes. Can be implemented both as a synchronous or as a coroutine function. In both cases a method runs in a worker thread from the GraphQL-processing threadpool. Args: root: Always `None`. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. The methods enlisted above receives "standard" set of GraphQL resolver arguments. The `info` field has `context` which can be used to transmit some useful payload between these methods. For example if `subscribe` sets `info.context.zen=42` then `publish` will have access to this value as `info.context.zen`. Static methods of subscription subclass: broadcast: Call this method to notify all subscriptions in the group. unsubscribe: Call this method to stop all subscriptions in the group. NOTE: If you call any of these methods from the asynchronous context then `await` the result of the call. """ # ----------------------------------------------------------------------- PUBLIC API # Return this from the `publish` to suppress the notification. SKIP = object() @classmethod def broadcast(cls, *, group=None, payload=None): """Call this method to notify all subscriptions in the group. This method can be called from both synchronous and asynchronous contexts. If you call it from the asynchronous context then you have to `await`. Args: group: Name of the subscription group which members must be notified. `None` means that all the subscriptions of type will be triggered. payload: The payload delivered to the `publish` handler. NOTE: The `payload` is serialized before sending to the subscription group. """ try: event_loop = asyncio.get_running_loop() except RuntimeError: pass else: if event_loop.is_running(): assert cls._from_coroutine(), ( "The eventloop is running so this call is going to return" " a coroutine object, but the function is called from" " a synchronous context, so you cannot simply 'await' the result!" " This may indicate a wrong usage. To force some particular" " behavior directly call 'broadcast_sync' or 'broadcast_async'." ) return cls.broadcast_async(group=group, payload=payload) return cls.broadcast_sync(group=group, payload=payload) @classmethod async def broadcast_async(cls, *, group=None, payload=None): """Asynchronous implementation of the `broadcast` method.""" # Offload to the thread cause it do DB operations and may work # slowly. db_sync_to_async = channels.db.database_sync_to_async # Manually serialize the `payload` to allow transfer of Django # models inside the `payload`. serialized_payload = await db_sync_to_async(Serializer.serialize)(payload) # Send the message to the Channels group. group = cls._group_name(group) group_send = channels.layers.get_channel_layer().group_send await group_send( group=group, message={ "type": "broadcast", "group": group, "payload": serialized_payload, }, ) @classmethod def broadcast_sync(cls, *, group=None, payload=None): """Synchronous implementation of the `broadcast` method.""" # Manually serialize the `payload` to allow transfer of Django # models inside the `payload`. serialized_payload = Serializer.serialize(payload) # Send the message to the Channels group. group = cls._group_name(group) group_send = asgiref.sync.async_to_sync( channels.layers.get_channel_layer().group_send ) group_send( group=group, message={ "type": "broadcast", "group": group, "payload": serialized_payload, }, ) @classmethod def unsubscribe(cls, *, group=None): """Call this method to stop all subscriptions in the group. This method can be called from both synchronous and asynchronous contexts. If you call it from the asynchronous context then you have to `await`. Args: group: Name of the subscription group which members must be unsubscribed. `None` means that all the client of the subscription will be unsubscribed. """ try: event_loop = asyncio.get_running_loop() except RuntimeError: pass else: if event_loop.is_running(): assert cls._from_coroutine(), ( "The eventloop is running so this call is going to return" " a coroutine object, but the function is called from" " a synchronous context,
MJ_filter_1H_to_3_comon.append(Mj_filter_1H_Phase[row_1H]) MJ_filter_3H_to_1_comon.append(Mj_filter_3H_Phase[row_3H]) Day_count_MJ_filter_1H_3H.append(Filter_1H_day_count[row_1H] +Filter_3H_day_count[row_3H] ) count_n = count_n + 1 N_MJ_filter_1H_3H = count_n -1 #1N to 4N MJ_Phase_1N_to_4_comon = [] MJ_Phase_4N_to_1_comon = [] Day_count_MJ_Phase_1N_4N = [] count_n = 0 for row_1N, hh_1N in enumerate(HH_1N): if hh_1N == str(-1): break for row_4N, hh_4N in enumerate(HH_4N): if hh_1N == hh_4N: MJ_Phase_1N_to_4_comon.append(Mj_1N_Phase[row_1N]) MJ_Phase_4N_to_1_comon.append(Mj_4N_Phase[row_4N]) Day_count_MJ_Phase_1N_4N.append(Phase_1N_day_count[row_1N] +Phase_4N_day_count[row_4N] ) count_n = count_n + 1 print('length of 1n and 4 n:', len(MJ_Phase_1N_to_4_comon), len(MJ_Phase_4N_to_1_comon) ) N_MJ_Phase_1N_4N = count_n -1 #for filter MJ_filter_1N_to_4_comon = [] MJ_filter_4N_to_1_comon = [] Day_count_MJ_filter_1N_4N = [] count_n = 0 for row_1N, hh_1N in enumerate(HH_1N): if hh_1N == str(-1): break for row_4N, hh_4N in enumerate(HH_4N): if hh_1N == hh_4N: MJ_filter_1N_to_4_comon.append(Mj_filter_1N_Phase[row_1N]) MJ_filter_4N_to_1_comon.append(Mj_filter_4N_Phase[row_4N]) Day_count_MJ_filter_1N_4N.append(Filter_1N_day_count[row_1N] +Filter_4N_day_count[row_4N] ) count_n = count_n + 1 N_MJ_filter_1N_4N = count_n -1 #2N to 3N MJ_Phase_2N_to_3_comon = [] MJ_Phase_3N_to_2_comon = [] Day_count_MJ_Phase_2N_3N = [] count_n = 0 for row_2N, hh_2N in enumerate(HH_2N): if hh_2N == str(-1): break for row_3N, hh_3N in enumerate(HH_3N): if hh_2N == hh_3N: MJ_Phase_2N_to_3_comon.append(Mj_2N_Phase[row_2N]) MJ_Phase_3N_to_2_comon.append(Mj_3N_Phase[row_3N]) Day_count_MJ_Phase_2N_3N.append(Phase_2N_day_count[row_2N] +Phase_3N_day_count[row_3N] ) print(hh_2N,Mj_2N_Phase[row_2N],hh_3N,Mj_3N_Phase[row_3N]) count_n = count_n + 1 N_MJ_Phase_2N_3N = count_n -1 #for filter MJ_filter_2N_to_3_comon = [] MJ_filter_3N_to_2_comon = [] Day_count_MJ_filter_2N_3N = [] count_n = 0 for row_2N, hh_2N in enumerate(HH_2N): if hh_2N == str(-1): break for row_3N, hh_3N in enumerate(HH_3N): if hh_2N == hh_3N: MJ_filter_2N_to_3_comon.append(Mj_filter_2N_Phase[row_2N]) MJ_filter_3N_to_2_comon.append(Mj_filter_3N_Phase[row_3N]) print(hh_2N,Mj_filter_2N_Phase[row_2N],hh_3N,Mj_filter_3N_Phase[row_3N]) Day_count_MJ_filter_2N_3N.append(Filter_2N_day_count[row_2N] +Filter_3N_day_count[row_3N] ) count_n = count_n + 1 N_MJ_filter_2N_3N = count_n - 1 #2N to 3N ###################____________________HOOOD # for Phase MJ_Phase_2H_to_3_comon = [] MJ_Phase_3H_to_2_comon = [] Day_count_MJ_Phase_2H_3H = [] count_n = 0 for row_2H, hh_2H in enumerate(HH_2H): if hh_2H == str(-1): break for row_3H, hh_3H in enumerate(HH_3H): if hh_2H == hh_3H: MJ_Phase_2H_to_3_comon.append(Mj_2H_Phase[row_2H]) MJ_Phase_3H_to_2_comon.append(Mj_3H_Phase[row_3H]) Day_count_MJ_Phase_2H_3H.append(Phase_2H_day_count[row_2H] +Phase_3H_day_count[row_3H] ) count_n = count_n + 1 N_MJ_Phase_2H_3H = count_n -1 #for filter MJ_filter_2H_to_3_comon = [] MJ_filter_3H_to_2_comon = [] Day_count_MJ_filter_2H_3H = [] count_n = 0 for row_2H, hh_2H in enumerate(HH_2H): if hh_2H == str(-1): break for row_3H, hh_3H in enumerate(HH_3H): if hh_2H == hh_3H: MJ_filter_2H_to_3_comon.append(Mj_filter_2H_Phase[row_2H]) MJ_filter_3H_to_2_comon.append(Mj_filter_3H_Phase[row_3H]) Day_count_MJ_filter_2H_3H.append(Filter_2H_day_count[row_2H] +Filter_3H_day_count[row_3H] ) count_n = count_n + 1 N_MJ_filter_2H_3H = count_n - 1 #2N to 4N MJ_Phase_2N_to_4_comon = [] MJ_Phase_4N_to_2_comon = [] Day_count_MJ_Phase_2N_4N = [] count_n = 0 for row_2N, hh_2N in enumerate(HH_2N): if hh_2N == str(-1): break for row_4N, hh_4N in enumerate(HH_4N): if hh_2N == hh_4N: MJ_Phase_2N_to_4_comon.append(Mj_2N_Phase[row_2N]) MJ_Phase_4N_to_2_comon.append(Mj_4N_Phase[row_4N]) Day_count_MJ_Phase_2N_4N.append(Phase_2N_day_count[row_2N] +Phase_4N_day_count[row_4N] ) count_n = count_n + 1 N_MJ_Phase_2N_4N = count_n -1 #for filter MJ_filter_2N_to_4_comon = [] MJ_filter_4N_to_2_comon = [] Day_count_MJ_filter_2N_4N = [] count_n = 0 for row_2N, hh_2N in enumerate(HH_2N): if hh_2N == str(-1): break for row_4N, hh_4N in enumerate(HH_4N): if hh_2N == hh_4N: MJ_filter_2N_to_4_comon.append(Mj_filter_2N_Phase[row_2N]) MJ_filter_4N_to_2_comon.append(Mj_filter_4N_Phase[row_4N]) Day_count_MJ_filter_2N_4N.append(Filter_2N_day_count[row_2N] +Filter_4N_day_count[row_4N] ) count_n = count_n + 1 N_MJ_filter_2N_4N = count_n - 1 #3N to 4N MJ_Phase_3N_to_4_comon = [] MJ_Phase_4N_to_3_comon = [] Day_count_MJ_Phase_3N_4N = [] count_n = 0 for row_3N, hh_3N in enumerate(HH_3N): if hh_3N == str(-1): break for row_4N, hh_4N in enumerate(HH_4N): if hh_3N == hh_4N: MJ_Phase_3N_to_4_comon.append(Mj_3N_Phase[row_3N]) MJ_Phase_4N_to_3_comon.append(Mj_4N_Phase[row_4N]) Day_count_MJ_Phase_3N_4N.append(Phase_3N_day_count[row_3N] +Phase_4N_day_count[row_4N] ) count_n = count_n + 1 N_MJ_Phase_3N_4N = count_n -1 #for filter MJ_filter_3N_to_4_comon = [] MJ_filter_4N_to_3_comon = [] Day_count_MJ_filter_3N_4N = [] count_n = 0 for row_3N, hh_3N in enumerate(HH_3N): if hh_3N == str(-1): break for row_4N, hh_4N in enumerate(HH_4N): if hh_3N == hh_4N: MJ_filter_3N_to_4_comon.append(Mj_filter_3N_Phase[row_3N]) MJ_filter_4N_to_3_comon.append(Mj_filter_4N_Phase[row_4N]) Day_count_MJ_filter_3N_4N.append(Filter_3N_day_count[row_3N] +Filter_4N_day_count[row_4N] ) count_n = count_n + 1 N_MJ_filter_3N_4N = count_n - 1 T_stat_1N_2N, P_val_1N_2N = scipy.stats.ttest_ind(MJ_Phase_1N_to_2_comon,MJ_Phase_2N_to_1_comon, axis=0, equal_var=True) degree_1N_2N = (N_MJ_Phase_1N_2N -1) *Level_of_confidence if degree_1N_2N < abs(T_stat_1N_2N): print('1N and 2N Phase rejects the null', T_stat_1N_2N,'P-value', P_val_1N_2N,'Sample size N', N_MJ_Phase_1N_2N) else: print('1N and 2N Phase accepts the null', T_stat_1N_2N,'P-value', P_val_1N_2N,'Sample size N', N_MJ_Phase_1N_2N) T_sign_1N_2N, P_sign_1N_2N = scipy.stats.wilcoxon(MJ_Phase_1N_to_2_comon, MJ_Phase_2N_to_1_comon) T_stat_1N_2N_filter, P_val_1N_2N_filter = scipy.stats.ttest_ind(MJ_filter_1N_to_2_comon,MJ_filter_2N_to_1_comon, axis=0, equal_var=True) degree_1N_2N_filter = (N_MJ_filter_1N_2N -1) *Level_of_confidence if degree_1N_2N_filter < abs(T_stat_1N_2N_filter): print('1N and 2N Filter rejects the null', T_stat_1N_2N_filter,'P-value', P_val_1N_2N_filter,'Sample size N', N_MJ_filter_1N_2N) else: print('1N and 2N Filter accepts the null', T_stat_1N_2N_filter,'P-value', P_val_1N_2N_filter,'Sample size N', N_MJ_filter_1N_2N) T_sign_1N_2N_filter, P_sign_1N_2N_filter = scipy.stats.wilcoxon(MJ_filter_1N_to_2_comon, MJ_filter_2N_to_1_comon) # 1n to 2n HOOOOOOD T_stat_1H_2H, P_val_1H_2H = scipy.stats.ttest_ind(MJ_Phase_1H_to_2_comon,MJ_Phase_2H_to_1_comon, axis=0, equal_var=True) degree_1H_2H = (N_MJ_Phase_1H_2H -1) *Level_of_confidence if degree_1H_2H < abs(T_stat_1H_2H): print('1H and 2H Phase rejects the null', T_stat_1H_2H,'P-value', P_val_1H_2H,'Sample size N', N_MJ_Phase_1H_2H) else: print('1H and 2H Phase accepts the null', T_stat_1H_2H,'P-value', P_val_1H_2H,'Sample size N', N_MJ_Phase_1H_2H) T_sign_1H_2H, P_sign_1H_2H = scipy.stats.wilcoxon(MJ_Phase_1H_to_2_comon, MJ_Phase_2H_to_1_comon) T_stat_1H_2H_filter, P_val_1H_2H_filter = scipy.stats.ttest_ind(MJ_filter_1H_to_2_comon,MJ_filter_2H_to_1_comon, axis=0, equal_var=True) degree_1H_2H_filter = (N_MJ_filter_1H_2H -1) *Level_of_confidence if degree_1H_2H_filter < abs(T_stat_1H_2H_filter): print('1H and 2H Filter rejects the null', T_stat_1H_2H_filter,'P-value', P_val_1H_2H_filter,'Sample size N', N_MJ_filter_1H_2H) else: print('1H and 2H Filter accepts the null', T_stat_1H_2H_filter,'P-value', P_val_1H_2H_filter,'Sample size N', N_MJ_filter_1H_2H) T_sign_1H_2H_filter, P_sign_1H_2H_filter = scipy.stats.wilcoxon(MJ_filter_1H_to_2_comon, MJ_filter_2H_to_1_comon) T_stat_1N_3N, P_val_1N_3N = scipy.stats.ttest_ind(MJ_Phase_1N_to_3_comon,MJ_Phase_3N_to_1_comon, axis=0, equal_var=True) degree_1N_3N = (N_MJ_Phase_1N_3N -1) *Level_of_confidence if degree_1N_3N < abs(T_stat_1N_3N): print('1N and 3N Phase rejects the null', T_stat_1N_3N,'P-value', P_val_1N_3N,'Sample size N', N_MJ_Phase_1N_3N) else: print('1N and 3N Phase accepts the null', T_stat_1N_3N,'P-value', P_val_1N_3N,'Sample size N', N_MJ_Phase_1N_3N) T_sign_1N_3N, P_sign_1N_3N = scipy.stats.wilcoxon(MJ_Phase_1N_to_3_comon, MJ_Phase_3N_to_1_comon) T_stat_1N_3N_filter, P_val_1N_3N_filter = scipy.stats.ttest_ind(MJ_filter_1N_to_3_comon,MJ_filter_3N_to_1_comon, axis=0, equal_var=True) degree_1N_3N_filter = (N_MJ_filter_1N_3N -1) *Level_of_confidence if degree_1N_3N_filter < abs(T_stat_1N_3N_filter): print('1N and 3N Filter rejects the null', T_stat_1N_3N_filter,'P-value', P_val_1N_3N_filter,'Sample size N', N_MJ_filter_1N_3N) else: print('1N and 3N Filter accepts the null', T_stat_1N_3N_filter,'P-value', P_val_1N_3N_filter,'Sample size N', N_MJ_filter_1N_3N) T_sign_1N_3N_filter, P_sign_1N_3N_filter = scipy.stats.wilcoxon(MJ_filter_1N_to_3_comon, MJ_filter_3N_to_1_comon) # 1n to 3n HOOOOOOD T_stat_1H_3H, P_val_1H_3H = scipy.stats.ttest_ind(MJ_Phase_1H_to_3_comon,MJ_Phase_3H_to_1_comon, axis=0, equal_var=True) degree_1H_3H = (N_MJ_Phase_1H_3H -1) *Level_of_confidence if degree_1H_3H < abs(T_stat_1H_3H): print('1H and 3H Phase rejects the null', T_stat_1H_3H,'P-value', P_val_1H_3H,'Sample size N', N_MJ_Phase_1H_3H) else: print('1H and 3H Phase accepts the null', T_stat_1H_3H,'P-value', P_val_1H_3H,'Sample size N', N_MJ_Phase_1H_3H) T_sign_1H_3H, P_sign_1H_3H = scipy.stats.wilcoxon(MJ_Phase_1H_to_3_comon, MJ_Phase_3H_to_1_comon) T_stat_1H_3H_filter, P_val_1H_3H_filter = scipy.stats.ttest_ind(MJ_filter_1H_to_3_comon,MJ_filter_3H_to_1_comon, axis=0, equal_var=True) degree_1H_3H_filter = (N_MJ_filter_1H_3H -1) *Level_of_confidence if degree_1H_3H_filter < abs(T_stat_1H_3H_filter): print('1H and 3H Filter rejects the null', T_stat_1H_3H_filter,'P-value', P_val_1H_3H_filter,'Sample size N', N_MJ_filter_1H_3H) else: print('1H and 3H Filter accepts the null', T_stat_1H_3H_filter,'P-value', P_val_1H_3H_filter,'Sample size N', N_MJ_filter_1H_3H) T_sign_1H_3H_filter, P_sign_1H_3H_filter = scipy.stats.wilcoxon(MJ_filter_1H_to_3_comon, MJ_filter_3H_to_1_comon) T_stat_1N_4N, P_val_1N_4N = scipy.stats.ttest_ind(MJ_Phase_1N_to_4_comon,MJ_Phase_4N_to_1_comon, axis=0, equal_var=True) degree_1N_4N = (N_MJ_Phase_1N_4N -1) *Level_of_confidence if degree_1N_4N < abs(T_stat_1N_4N): print('1N and 4N Phase rejects the null', T_stat_1N_4N,'P-value', P_val_1N_4N,'Sample size N', N_MJ_Phase_1N_4N) else: print('1N and 4N Phase accepts the null', T_stat_1N_4N,'P-value', P_val_1N_4N,'Sample size N', N_MJ_Phase_1N_4N) T_sign_1N_4N, P_sign_1N_4N = scipy.stats.wilcoxon(MJ_Phase_1N_to_4_comon, MJ_Phase_4N_to_1_comon) T_stat_1N_4N_filter, P_val_1N_4N_filter = scipy.stats.ttest_ind(MJ_filter_1N_to_4_comon,MJ_filter_4N_to_1_comon, axis=0, equal_var=True) degree_1N_4N_filter = (N_MJ_filter_1N_4N -1) *Level_of_confidence if degree_1N_4N_filter < abs(T_stat_1N_4N_filter): print('1N and 4N Filter rejects the null', T_stat_1N_4N_filter,'P-value', P_val_1N_4N_filter,'Sample size N', N_MJ_filter_1N_4N) else: print('1N and 4N Filter accepts the null', T_stat_1N_4N_filter,'P-value', P_val_1N_4N_filter,'Sample size N', N_MJ_filter_1N_4N) T_sign_1N_4N_filter, P_sign_1N_4N_filter = scipy.stats.wilcoxon(MJ_filter_1N_to_4_comon, MJ_filter_4N_to_1_comon) T_stat_2N_3N, P_val_2N_3N = scipy.stats.ttest_ind(MJ_Phase_2N_to_3_comon,MJ_Phase_3N_to_2_comon, axis=0, equal_var=True) degree_2N_3N = (N_MJ_Phase_2N_3N -1) *Level_of_confidence if degree_2N_3N < abs(T_stat_2N_3N): print('2N and 3N Phase rejects the null', T_stat_2N_3N,'P-value', P_val_2N_3N,'Sample size N', N_MJ_Phase_2N_3N) else: print('2N and 3N Phase accepts the null', T_stat_2N_3N,'P-value', P_val_2N_3N,'Sample size N', N_MJ_Phase_2N_3N) T_sign_2N_3N, P_sign_2N_3N = scipy.stats.wilcoxon(MJ_Phase_2N_to_3_comon, MJ_Phase_3N_to_2_comon) T_stat_2N_3N_filter, P_val_2N_3N_filter = scipy.stats.ttest_ind(MJ_filter_2N_to_3_comon,MJ_filter_3N_to_2_comon, axis=0, equal_var=True) degree_2N_3N_filter = (N_MJ_filter_2N_3N -1) *Level_of_confidence if degree_2N_3N_filter < abs(T_stat_2N_3N_filter): print('2N and 3N Filter rejects the null', T_stat_2N_3N_filter,'P-value', P_val_2N_3N_filter,'Sample size N', N_MJ_filter_2N_3N) else: print('2N and 3N Filter accepts the null', T_stat_2N_3N_filter,'P-value', P_val_2N_3N_filter,'Sample size N', N_MJ_filter_2N_3N) T_sign_2N_3N_filter, P_sign_2N_3N_filter = scipy.stats.wilcoxon(MJ_filter_2N_to_3_comon, MJ_filter_3N_to_2_comon) # 2n to 3n HOOOOOOD T_stat_2H_3H, P_val_2H_3H = scipy.stats.ttest_ind(MJ_Phase_2H_to_3_comon,MJ_Phase_3H_to_2_comon, axis=0, equal_var=True) degree_2H_3H = (N_MJ_Phase_2H_3H -1) *Level_of_confidence if degree_2H_3H < abs(T_stat_2H_3H): print('2H and 3H Phase rejects the null', T_stat_2H_3H,'P-value', P_val_2H_3H,'Sample size N', N_MJ_Phase_2H_3H) else: print('2H and 3H Phase accepts the null', T_stat_2H_3H,'P-value', P_val_2H_3H,'Sample size N', N_MJ_Phase_2H_3H) T_sign_2H_3H, P_sign_2H_3H = scipy.stats.wilcoxon(MJ_Phase_2H_to_3_comon, MJ_Phase_3H_to_2_comon) T_stat_2H_3H_filter, P_val_2H_3H_filter = scipy.stats.ttest_ind(MJ_filter_2H_to_3_comon,MJ_filter_3H_to_2_comon, axis=0, equal_var=True) degree_2H_3H_filter = (N_MJ_filter_2H_3H -1) *Level_of_confidence if degree_2H_3H_filter < abs(T_stat_2H_3H_filter): print('2H and 3H Filter rejects the null', T_stat_2H_3H_filter,'P-value', P_val_2H_3H_filter,'Sample size N', N_MJ_filter_2H_3H) else: print('2H and 3H Filter accepts the null', T_stat_2H_3H_filter,'P-value', P_val_2H_3H_filter,'Sample size N', N_MJ_filter_2H_3H) T_sign_2H_3H_filter, P_sign_2H_3H_filter = scipy.stats.wilcoxon(MJ_filter_2H_to_3_comon, MJ_filter_3H_to_2_comon) #2N to 4N T_stat_2N_4N, P_val_2N_4N = scipy.stats.ttest_ind(MJ_Phase_2N_to_4_comon,MJ_Phase_4N_to_2_comon, axis=0, equal_var=True) degree_2N_4N = (N_MJ_Phase_2N_4N -1) *Level_of_confidence if degree_2N_4N < abs(T_stat_2N_4N): print('2N and 4N Phase rejects the null', T_stat_2N_4N,'P-value', P_val_2N_4N,'Sample size N', N_MJ_Phase_2N_4N) else: print('2N and 4N Phase accepts the null', T_stat_2N_4N,'P-value', P_val_2N_4N,'Sample size N', N_MJ_Phase_2N_4N) T_sign_2N_4N, P_sign_2N_4N = scipy.stats.wilcoxon(MJ_Phase_2N_to_4_comon, MJ_Phase_4N_to_2_comon) T_stat_2N_4N_filter, P_val_2N_4N_filter = scipy.stats.ttest_ind(MJ_filter_2N_to_4_comon,MJ_filter_4N_to_2_comon, axis=0, equal_var=True) degree_2N_4N_filter = (N_MJ_filter_2N_4N -1) *Level_of_confidence if degree_2N_4N_filter < abs(T_stat_2N_4N_filter): print('2N and 4N Filter rejects the null', T_stat_2N_4N_filter,'P-value', P_val_2N_4N_filter,'Sample size N', N_MJ_filter_2N_4N) else: print('2N and 4N Filter accepts the null', T_stat_2N_4N_filter,'P-value', P_val_2N_4N_filter,'Sample size N', N_MJ_filter_2N_4N) T_sign_2N_4N_filter, P_sign_2N_4N_filter = scipy.stats.wilcoxon(MJ_filter_2N_to_4_comon, MJ_filter_4N_to_2_comon) #3N to 4N T_stat_3N_4N, P_val_3N_4N = scipy.stats.ttest_ind(MJ_Phase_3N_to_4_comon,MJ_Phase_4N_to_3_comon, axis=0, equal_var=True) degree_3N_4N = (N_MJ_Phase_3N_4N -1) *Level_of_confidence if degree_3N_4N < abs(T_stat_3N_4N): print('3N and 4N Phase rejects the null', T_stat_3N_4N,'P-value', P_val_3N_4N,'Sample size N', N_MJ_Phase_3N_4N) else: print('3N and 4N Phase accepts the null', T_stat_3N_4N,'P-value', P_val_3N_4N,'Sample size N', N_MJ_Phase_3N_4N) T_sign_3N_4N, P_sign_3N_4N = scipy.stats.wilcoxon(MJ_Phase_3N_to_4_comon, MJ_Phase_4N_to_3_comon) T_stat_3N_4N_filter, P_val_3N_4N_filter = scipy.stats.ttest_ind(MJ_filter_3N_to_4_comon,MJ_filter_4N_to_3_comon, axis=0, equal_var=True) degree_3N_4N_filter = (N_MJ_filter_3N_4N -1) *Level_of_confidence if degree_3N_4N_filter < abs(T_stat_3N_4N_filter): print('3N and 4N Filter rejects the null', T_stat_3N_4N_filter,'P-value', P_val_3N_4N_filter,'Sample size N', N_MJ_filter_3N_4N) else: print('3N and 4N Filter accepts the null', T_stat_3N_4N_filter,'P-value', P_val_3N_4N_filter,'Sample size N', N_MJ_filter_3N_4N) T_sign_3N_4N_filter, P_sign_3N_4N_filter = scipy.stats.wilcoxon(MJ_filter_3N_to_4_comon, MJ_filter_4N_to_3_comon) whole_t_stat = [T_stat_1N_2N, T_stat_1N_3N, T_stat_1N_4N, T_stat_2N_3N, T_stat_3N_4N,T_stat_2N_4N] whole_p_test = [P_val_1N_2N,P_val_1N_3N,P_val_1N_4N,P_val_2N_3N,P_val_3N_4N,P_val_2N_4N] Whole_sample = [N_MJ_Phase_1N_2N, N_MJ_Phase_1N_3N, N_MJ_Phase_1N_4N, N_MJ_Phase_2N_3N, N_MJ_Phase_3N_4N,N_MJ_Phase_2N_4N] Whole_degree = [degree_1N_2N, degree_1N_3N, degree_1N_4N, degree_2N_3N, degree_3N_4N, degree_2N_4N] Whole_sighn_t_stat = [T_sign_1N_2N,T_sign_1N_3N,T_sign_1N_4N,T_sign_2N_3N,T_sign_3N_4N,T_sign_2N_4N] Whole_sighn_p_test = [P_sign_1N_2N,P_sign_1N_3N,P_sign_1N_4N,P_sign_2N_3N,P_sign_3N_4N, P_sign_3N_4N] STD_1 = [np.std(MJ_Phase_1N_to_2_comon), np.std(MJ_Phase_1N_to_3_comon),np.std(MJ_Phase_1N_to_4_comon),np.std(MJ_Phase_2N_to_3_comon),np.std(MJ_Phase_3N_to_4_comon),np.std(MJ_Phase_2N_to_4_comon)] Median_1 = [stat.median(MJ_Phase_1N_to_2_comon), stat.median(MJ_Phase_1N_to_3_comon),stat.median(MJ_Phase_1N_to_4_comon),stat.median(MJ_Phase_2N_to_3_comon),stat.median(MJ_Phase_3N_to_4_comon), stat.median(MJ_Phase_2N_to_4_comon)] Mean_1 = [np.average(MJ_Phase_1N_to_2_comon),np.average(MJ_Phase_1N_to_3_comon),np.average(MJ_Phase_1N_to_4_comon),np.average(MJ_Phase_2N_to_3_comon),np.average(MJ_Phase_3N_to_4_comon),np.average(MJ_Phase_2N_to_4_comon)] STD_2 = [np.std(MJ_Phase_2N_to_1_comon), np.std(MJ_Phase_3N_to_1_comon),np.std(MJ_Phase_4N_to_1_comon),np.std(MJ_Phase_3N_to_2_comon),np.std(MJ_Phase_4N_to_3_comon),np.std(MJ_Phase_4N_to_2_comon)] Median_2 = [stat.median(MJ_Phase_2N_to_1_comon), stat.median(MJ_Phase_3N_to_1_comon),stat.median(MJ_Phase_4N_to_1_comon),stat.median(MJ_Phase_3N_to_2_comon),stat.median(MJ_Phase_4N_to_3_comon),stat.median(MJ_Phase_4N_to_2_comon)] Mean_2 = [np.average(MJ_Phase_2N_to_1_comon),np.average(MJ_Phase_3N_to_1_comon),np.average(MJ_Phase_4N_to_1_comon),np.average(MJ_Phase_3N_to_2_comon),np.average(MJ_Phase_4N_to_3_comon),np.average(MJ_Phase_4N_to_2_comon)] No_hood_percent_days_Filtered = [sum(Filter_1N_day_count)/sum(Phase_1N_day_count),sum(Filter_2N_day_count)/sum(Phase_2N_day_count),sum(Filter_3N_day_count)/sum(Phase_3N_day_count),sum(Filter_4N_day_count)/sum(Phase_4N_day_count) ] hood_percent_days_Filtered = [sum(Filter_1H_day_count)/sum(Phase_1H_day_count),sum(Filter_2H_day_count)/sum(Phase_2H_day_count),sum(Filter_3H_day_count)/sum(Phase_3H_day_count)] Hood_percentage = {'Phase':['1H','2H','3H'], 'Percentatges of hood filter':hood_percent_days_Filtered} No_Hood_percentage = {'Phase':['1N','2N','3N','4N'],'Percentatges of No hood filter':No_hood_percent_days_Filtered} df_percent_hood = pd.DataFrame(Hood_percentage) df_percent_No_hood = pd.DataFrame(No_Hood_percentage) whole_t_stat_H = [T_stat_1H_2H, T_stat_1H_3H, T_stat_2H_3H] whole_p_test_H = [P_val_1H_2H,P_val_1H_3H,P_val_2H_3H] Whole_sample_H = [N_MJ_Phase_1H_2H, N_MJ_Phase_1H_3H,N_MJ_Phase_2H_3H] Whole_degree_H = [degree_1H_2H, degree_1H_3H, degree_2H_3H] Whole_sighn_t_stat_H = [T_sign_1H_2H,T_sign_1H_3H,T_sign_2H_3H] Whole_sighn_p_test_H = [P_sign_1H_2H,P_sign_1H_3H,P_sign_2H_3H] STD_1_H = [np.std(MJ_Phase_1H_to_2_comon),
""" motifscan.cli.main ------------------ Main command line interface of MotifScan. """ import argparse import os import sys from textwrap import dedent from motifscan import __version__ from motifscan.cli import config, genome, motif, scan from motifscan.config import user_rc_path from motifscan.logging import setup_logger from motifscan.region import REGION_FORMATS def _exit(status=0, message=None): if message: print(message, file=sys.stderr) sys.exit(status) def _pos_int(value): """Check whether a passed argument is a positive integer.""" try: value_int = int(value) if value_int <= 0: raise ValueError except (ValueError, TypeError): raise argparse.ArgumentTypeError( f"invalid positive int value: {value!r}") return value_int def _non_negative_int(value): """Check whether a passed argument is a non-negative integer.""" try: value_int = int(value) if value_int < 0: raise ValueError except (ValueError, TypeError): raise argparse.ArgumentTypeError( f"invalid non-negative int value: {value!r}") return value_int def _add_verbose_argument(parser): parser.add_argument( "--verbose", dest="verbose", action="store_true", default=False, help="Enable verbose log messages.") return parser def configure_parser_main(): """Configure the arguments parsers for MotifScan.""" description = dedent(""" MotifScan: A motif discovery tool to detect the occurrences of known motifs Given a set of input genomic regions, MotifScan scans the sequences to detect the occurrences of known motifs. It can also perform an enrichment analysis to check whether these motifs are over/under-represented compared to the control regions. !!! NOTE !!! MotifScan requires basic data files including genome sequences and motif PFMs (Position Frequency Matrices) to detect the binding sites of motifs. Before scanning, users should install genome assemblies and motif sets from a remote database or with local prepared files via `motifscan genome` and `motifscan motif` subcommands. Citation: <NAME>., <NAME>., <NAME>. et al. Quantitative integration of epigenomic variation and transcription factor binding using MAmotif toolkit identifies an important role of IRF2 as transcription activator at gene promoters. Cell Discov 4, 38 (2018). https://doi.org/10.1038/s41421-018-0045-y """) epilog_msg = dedent(""" Please run `motifscan COMMAND -h` to see the subcommand options. See also: Documentation: https://motifscan.readthedocs.io Source code: https://github.com/shao-lab/MotifScan Bug reports: https://github.com/shao-lab/MotifScan/issues """) parser = argparse.ArgumentParser( description=description, epilog=epilog_msg, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("-v", "--version", action="version", version=f"MotifScan {__version__}") subparsers = parser.add_subparsers(title="MotifScan Subcommands", metavar="command", dest="cmd") configure_parser_config(subparsers) configure_parser_genome(subparsers) configure_parser_motif(subparsers) configure_parser_scan(subparsers) return parser def configure_parser_config(subparsers): """Configure the arguments parsers for 'config' subcommand.""" help_msg = "Configure data paths for MotifScan." desc_msg = help_msg + dedent(f""" Commands listed below enable users to change the default installation location of genome/motif data files and check the paths of installed genome assemblies or motif sets. The user specific config file is located at: {user_rc_path} """) epilog_msg = dedent(""" Examples: --------- 1) Display all values set in the config file: motifscan config --show 2) Change the default installation location for genome assemblies: motifscan config --set-default-genome <path> 3) Change the default installation location for motif sets: motifscan config --set-default-motif <path> 4) Get the genome path of a specific genome assembly: motifscan config --get-genome <genome_name> 5) Change the motif path for a specific motif set: motifscan config --set-motif <motif_set> <path> """) parser = subparsers.add_parser( "config", description=desc_msg, help=help_msg, epilog=epilog_msg, formatter_class=argparse.RawDescriptionHelpFormatter) parser_basic = parser.add_argument_group("Basic Options") parser_basic.add_argument( "--show", dest="show", action="store_true", default=False, help="Show all configured values.") parser_default = parser.add_argument_group("Default Install Location") parser_default.add_argument( "--set-default-genome", metavar="PATH", dest="set_default_genome", help="Set the default installation path for genome assemblies.") parser_default.add_argument( "--set-default-motif", metavar="PATH", dest="set_default_motif", help="Set the default installation path for motif sets.") parser_genome = parser.add_argument_group("Genome Path Options") parser_genome.add_argument( "--get-genome", metavar="NAME", dest="get_genome", help="Get the genome path of a specific genome assembly.") parser_genome.add_argument( "--set-genome", metavar=("NAME", "PATH"), dest="set_genome", nargs=2, help="Set the genome path for a specific genome assembly.") parser_genome.add_argument( "--rm-genome", metavar="NAME", dest="rm_genome", help="Remove a specific genome assembly.") parser_motif = parser.add_argument_group("Motif Path Options") parser_motif.add_argument( "--get-motif", metavar="NAME", dest="get_motif", help="Get the motif path of a specific motif set.") parser_motif.add_argument( "--set-motif", metavar=("NAME", "PATH"), dest="set_motif", nargs=2, help="Set the motif path for a specific motif set.") parser_motif.add_argument( "--rm-motif", metavar="NAME", dest="rm_motif", help="Remove a specific motif set.") parser = _add_verbose_argument(parser) parser.set_defaults(func=config.run) def configure_parser_genome(subparsers): """Configure the arguments parsers for the 'genome' subcommand.""" help_msg = "Genome assembly commands." desc_msg = help_msg + dedent(""" This subcommand controls the genome assemblies used by MotifScan. MotifScan requires a sequences FASTA file and a gene annotation file (if available) for each genome assembly, users can either download them from a remote database or install directly with local prepared files. """) epilog_msg = dedent(""" Examples: --------- 1) Display installed genomes: motifscan genome --list 2) Display all available genomes in a remote database: motifscan genome --list-remote 3) Search genomes in a remote database by keyword (e.g. 'human'): motifscan genome --search human 4) Install 'hg19' genome assembly from a remote database: motifscan genome --install -n hg19 -r hg19 5) Install 'hg19' genome assembly with local prepared files: motifscan genome --install -n hg19 -i <hg19.fa> -a <refGene.txt> 6) Uninstall a genome assembly: motifscan genome --uninstall <genome_name> Notes: ------ The path of newly installed genome will be automatically saved. If you move the directory to another location later, please reconfigure it: motifscan config --set-genome <genome_name> <new_path> """) parser = subparsers.add_parser( "genome", description=desc_msg, help=help_msg, epilog=epilog_msg, formatter_class=argparse.RawDescriptionHelpFormatter) subcommands = parser.add_argument_group("Genome Subcommands") subcommands = subcommands.add_mutually_exclusive_group() subcommands.add_argument( "--list", dest="list", action="store_true", default=False, help="Display installed genome assemblies.") subcommands.add_argument( "--list-remote", dest="list_remote", action="store_true", default=False, help="Display available remote genome assemblies.") subcommands.add_argument( "--search", metavar="KEYWORD", dest="search", help="Search for genome assemblies in a remote database.") subcommands.add_argument( "--install", dest="install", action="store_true", default=False, help="Install a new genome assembly.") subcommands.add_argument( "--uninstall", metavar="NAME", dest="uninstall", help="Uninstall a genome assembly.") subcommands.required = True parser_install = parser.add_argument_group("Install Options") parser_install.add_argument( "-n", "--name", metavar="NAME", dest="name", help="Name of the genome assembly to be installed.") parser_install.add_argument( "-i", metavar="FASTA", dest="fasta_files", nargs="+", help="Local genome sequences file(s) in FASTA format.") parser_install.add_argument( "-a", metavar="ANNOTATION", dest="gene_file", help="Local gene annotation (refGene.txt) file.") parser_install.add_argument( "-r", "--remote", metavar="GENOME", dest="remote", help="Download required data files from a remote assembly.") parser_install.add_argument( "-o", "--output-dir", metavar="DIR", dest="output_dir", help="Write to a given directory instead of the default directory.") parser_remote = parser.add_argument_group("Remote Database Options") parser_remote.add_argument( "--database", dest="database", choices=["ucsc"], default="ucsc", help="Which remote database is used to list/install/search genome " "assemblies. Default: ucsc") parser_remote.add_argument( "--clean", dest="clean", action="store_true", default=False, help="Clean the download directory after installation.") parser = _add_verbose_argument(parser) parser.set_defaults(func=genome.run) def _check_args_genome(args): """Check the arguments of the 'genome' subcommand.""" if args.install: # -n/--name must be specified if not args.name: _exit(1, "motifscan genome --install: error: argument -n/--name " "is required") # check conflict between local model and remote mode if args.remote and (args.fasta_files or args.gene_file): _exit(1, "motifscan genome --install: error: argument -r/--remote " "is not allowed with argument -i or -a") # -i/-a must be specified in local mode if not args.remote: if not args.fasta_files: _exit(1, "motifscan genome --install: error: argument -i is " "required") if not args.gene_file: _exit(1, "motifscan genome --install: error: argument -a is " "required") # check if the input files are existed input_files = list(args.fasta_files) input_files.append(args.gene_file) for path in input_files: if not os.path.isfile(path): _exit(1, f"motifscan genome --install: error: file not " f"found: {path}") def configure_parser_motif(subparsers): """Configure the arguments parsers for the 'motif' subcommand.""" help_msg = "Motif set (PFMs/PWMs) commands." desc_msg = help_msg + dedent(""" MotifScan only detects the binding sites of known motifs. Before scanning, the motif set should be installed and built with PFMs (Position Frequency Matrices). Since different assemblies have different genome contents, it is necessary to build the PFMs and get proper motif score cutoffs for every genome assembly you want to scan later. """) epilog_msg = dedent(""" Examples: --------- 1) Display installed motif sets: motifscan motif --list 2) Display all available motif sets in a remote database: motifscan motif --list-remote 3) Install a motif set from a remote database and build for genome 'hg19': motifscan motif --install -n <motif_set> -r <remote_PFMs> -g hg19 4) Install a motif set with local PFMs file(s) and build for genome 'mm9': motifscan motif --install -n <motif_set> -i <pfms.jaspar> -g mm9 5) Build an installed motif set (PFMs) for additional assembly 'hg38': motifscan motif --build <motif_set> -g hg38 6) Uninstall a motif set: motifscan motif --uninstall <motif_set> Notes: ------ 1) When installing a motif set by `--install`, you can
<gh_stars>0 import numpy as np import matplotlib import matplotlib.pyplot as plt from danpy.sb import dsb from danpy.useful_functions import save_figures,is_number from scipy import signal import numdifftools as nd import scipy as sp from params import * # from animate import * def LP_filt(filter_length, x): """ Finite Impulse Response (FIR) Moving Average (MA) Low-Pass Filter """ b=np.ones(filter_length,)/(filter_length) #Finite Impulse Response (FIR) Moving Average (MA) filter with one second filter length a=1 y = signal.filtfilt(b, a, x) return y class plant_pendulum_1DOF2DOF: def __init__(self,**params): self.Ij = params.get("Joint Inertia", 1.15e-2) # kg⋅m² is_number(self.Ij,"Joint Inertia",default=1.15e-2) self.bj = params.get("Joint Damping", 0.001) # N⋅s⋅m⁻¹ is_number(self.bj,"Joint Damping",default=0.001) self.mj = params.get("Joint Mass", 0.541) # kg is_number(self.mj,"Joint Mass",default=0.541) self.rj = params.get("Joint Moment Arm", 0.05) # m is_number(self.rj,"Joint Moment Arm",default=0.05) self.Lcm = params.get("Link Center of Mass", 0.085) # m is_number(self.Lcm,"Link Center of Mass",default=0.085) self.L = params.get("Link Length", 0.3) # m is_number(self.L,"Link Length",default=0.3) self.Jm = params.get("Motor Inertia", 6.6e-5) # kg⋅m² is_number(self.Jm,"Motor Inertia",default=6.6e-5) self.bm = params.get("Motor Damping", 0.00462) # N⋅s⋅m⁻¹ is_number(self.bm,"Motor Damping",default=0.00462) self.rm = params.get("Motor Moment Arm", 0.01) # m is_number(self.rm,"Motor Moment Arm",default=0.01) self.k_spr = params.get("Spring Stiffness Coefficient",1) # N is_number(self.k_spr,"",default=1) self.b_spr = params.get("Spring Shape Coefficient",100) # unit-less is_number(self.b_spr,"",default=1) self.simulationDuration = params.get("Simulation Duration", 1000) is_number(self.simulationDuration,"Simulation Duration") self.dt = params.get("dt", 0.01) is_number(self.dt,"dt") self.k0 = params.get( "Position Gains", { 0 : 3162.3, 1 : 1101.9, 2 : 192.0, 3 : 19.6 } ) self.ks = params.get( "Stiffness Gains", { 0 : 316.2, 1 : 25.1 } ) self.Lf4h0_list = [] self.Lf2hs_list = [] self.df2dx1_list = [] self.df2dx2_list = [] self.df2dx3_list = [] self.df2dx5_list = [] self.vs_list = [] def C(self,X): """ Returns zero until the effects are quantified """ return( 0 ) def dCdx1(self,X): return(0) def d2Cdx12(self,X): return(0) def d2Cdx1x2(self,X): return(0) def dCdx2(self,X): return(0) def d2Cdx22(self,X): return(0) def update_state_variables(self,X): #>>>> State functions self.f1 = self.f1_func(X) self.f2 = self.f2_func(X) self.f3 = self.f3_func(X) self.f4 = self.f4_func(X) self.f5 = self.f5_func(X) self.f6 = self.f6_func(X) #>>>> State functions first gradient # self.df1dx1 = 0 self.df1dx2 = 1 # self.df1dx3 = 0 # self.df1dx4 = 0 # self.df1dx5 = 0 # self.df1dx6 = 0 self.df2dx1 = self.df2dx1_func(X) self.df2dx1_list.append(self.df2dx1) self.df2dx2 = self.df2dx2_func(X) self.df2dx2_list.append(self.df2dx2) self.df2dx3 = self.df2dx3_func(X) self.df2dx3_list.append(self.df2dx3) # self.df2dx4 = 0 self.df2dx5 = self.df2dx5_func(X) self.df2dx5_list.append(self.df2dx5) # self.df2dx6 = 0 # self.df3dx1 = 0 # self.df3dx2 = 0 # self.df3dx3 = 0 self.df3dx4 = 1 # self.df3dx5 = 0 # self.df3dx6 = 0 # self.df4dx1 = N/A # self.df4dx2 = N/A # self.df4dx3 = N/A # self.df4dx4 = N/A # self.df4dx5 = N/A # self.df4dx6 = N/A # self.df5dx1 = 0 # self.df5dx2 = 0 # self.df5dx3 = 0 # self.df5dx4 = 0 # self.df5dx5 = 0 self.df5dx6 = 1 # self.df6dx1 = N/A # self.df6dx2 = N/A # self.df6dx3 = N/A # self.df6dx4 = N/A # self.df6dx5 = N/A # self.df6dx6 = N/A #>>>> State functions second gradient self.d2f2dx12 = self.d2f2dx12_func(X) self.d2f2dx1x2 = self.d2f2dx1x2_func(X) self.d2f2dx1x3 = self.d2f2dx1x3_func(X) self.d2f2dx1x5 = self.d2f2dx1x5_func(X) self.d2f2dx22 = self.d2f2dx22_func(X) self.d2f2dx32 = self.d2f2dx32_func(X) self.d2f2dx52 = self.d2f2dx52_func(X) # def motor_coupling_function(self,X,motorNumber): # return( # self.rm*self.k_spr*( # np.exp( # self.b_spr*( # self.rm*X[2+2*(motorNumber-1)] # + ((1.5-motorNumber)/0.5)*self.rj*X[0] # ) # ) # -1 # ) # ) def tendon_1_FL_func(self,X): return( self.k_spr*( np.exp(self.b_spr*(self.rm*X[2]-self.rj*X[0])) - 1 ) ) def tendon_2_FL_func(self,X): return( self.k_spr*( np.exp(self.b_spr*(self.rm*X[4]+self.rj*X[0])) - 1 ) ) def f1_func(self,X): return(X[1]) def f2_func(self,X): return( ( -self.C(X) # Coriolis and centrifugal torques (zero) - self.bj*X[1] # damping torque - self.Lcm*self.mj*gr*np.sin(X[0]) # gravitational torque + self.rj*self.k_spr * ( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) - np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) # total coupling torque between motors and joint )/self.Ij ) def df2dx1_func(self,X): result = ( ( -self.dCdx1(X) # Coriolis and centrifugal torques (zero) - self.Lcm*self.mj*gr*np.cos(X[0]) # gravitational torque - (self.rj**2)*self.k_spr*self.b_spr * ( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) + np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) # total coupling torque between motors and joint )/self.Ij ) return(result) def d2f2dx12_func(self,X): return( ( -self.d2Cdx12(X) # Coriolis and centrifugal torques (zero) + self.Lcm*self.mj*gr*np.sin(X[0]) # gravitational torque + (self.rj**3)*self.k_spr*(self.b_spr**2) * ( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) - np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) # total coupling torque between motors and joint )/self.Ij ) def d2f2dx1x2_func(self,X): return( ( -self.d2Cdx1x2(X) # Coriolis and centrifugal torques (zero) )/self.Ij ) def d2f2dx1x3_func(self,X): """ This is equivalently -dSda/Ij """ return( -(self.rj**2)*self.rm*self.k_spr*(self.b_spr**2) * ( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) ) / self.Ij ) def d2f2dx1x5_func(self,X): """ This is equivalently dSdb/Ij """ return( -(self.rj**2)*self.rm*self.k_spr*(self.b_spr**2) * ( np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) / self.Ij ) def df2dx2_func(self,X): result = ( ( -self.dCdx2(X) # Coriolis and centrifugal torques (zero) - self.bj # damping torque )/self.Ij ) return(result) def d2f2dx22_func(self,X): return( ( -self.d2Cdx22(X) # Coriolis and centrifugal torques (zero) )/self.Ij ) def df2dx3_func(self,X): """ Equivalently, this is the negative value of -Q_{11}/Ij """ result = ( self.rj*self.rm*self.k_spr*self.b_spr * ( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) ) / self.Ij ) return(result) def d2f2dx32_func(self,X): return( self.rj*(self.rm**2)*self.k_spr*(self.b_spr**2) * ( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) ) / self.Ij ) def df2dx5_func(self,X): """ Equivalently, this is the negative value of -Q_{12}/Ij """ result = ( -self.rj*self.rm*self.k_spr*self.b_spr * ( np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) / self.Ij ) return(result) def d2f2dx52_func(self,X): return( -self.rj*(self.rm**2)*self.k_spr*(self.b_spr**2) * ( np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) / self.Ij ) def f3_func(self,X): return(X[3]) def f4_func(self,X): return( ( -self.bm*X[3] - self.rm*self.k_spr*( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) -1 ) )/self.Jm ) def f5_func(self,X): return(X[5]) def f6_func(self,X): return( ( -self.bm*X[5] - self.rm*self.k_spr*( np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) -1 ) )/self.Jm ) def f(self,X): result = np.zeros((6,1)) result[0,0] = self.f1 result[1,0] = self.f2 result[2,0] = self.f3 result[3,0] = self.f4 result[4,0] = self.f5 result[5,0] = self.f6 return(result) def g(self,X): result = np.matrix(np.zeros((6,2))) result[3,0] = 1/self.Jm result[5,1] = 1/self.Jm return(result) def h(self,X): result = np.zeros((2,)) result[0] = X[0] result[1] = (self.rj**2)*self.k_spr*self.b_spr*( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) + np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) return(result) def forward_simulation(self,Time,X_o,U=None): """ Building our own f_array to reduce the number of calls for f_funcs by making it a static call for each iteration in the FBL instance. """ assert len(X_o)==6, "X_o must have 6 elements, not " + str(len(X_o)) + "." dt = Time[1]-Time[0] if U is None: U = np.zeros((2,len(Time)-1)) else: assert np.shape(U)==(2,len(Time)-1), "U must be either None (default) of have shape (2,len(Time)-1), not " + str(np.shape(U)) + "." X = np.zeros((6,len(Time))) Y = np.zeros((2,len(Time))) X[:,0] = X_o Y[:,0] = self.h(X[:,0]) statusbar=dsb(0,len(Time)-1,title="Forward Simulation (Custom)") for i in range(len(Time)-1): f_array = np.zeros((6,1)) f_array[0,0] = self.f1_func(X[:,i]) f_array[1,0] = self.f2_func(X[:,i]) f_array[2,0] = self.f3_func(X[:,i]) f_array[3,0] = self.f4_func(X[:,i]) f_array[4,0] = self.f5_func(X[:,i]) f_array[5,0] = self.f6_func(X[:,i]) X[:,i+1] = ( X[:,i] + dt*( f_array + self.g(X[:,i])@U[:,np.newaxis,i] ).T ) Y[:,i+1] = self.h(X[:,i+1]) # self.update_state_variables(X[:,i+1]) statusbar.update(i) return(X,U,Y) def h0(self,X): return(X[0]) def Lfh0(self,X): return(X[1]) def Lf2h0(self,X): return(self.f2) def Lf3h0(self,X): result = ( self.df2dx1*self.f1 + self.df2dx2*self.f2 + self.df2dx3*self.f3 + self.df2dx5*self.f5 ) return(result) def Lf4h0(self,X): return( ( self.d2f2dx12*self.f1 + self.d2f2dx1x2*self.f2 + self.df2dx2*self.df2dx1 + self.d2f2dx1x3*self.f3 + self.d2f2dx1x5*self.f5 ) * self.f1 + ( self.d2f2dx1x2*self.f1 + self.df2dx1 + self.d2f2dx22*self.f2 + (self.df2dx2**2) ) * self.f2 + ( self.d2f2dx1x3*self.f1 + self.df2dx2*self.df2dx3 + self.d2f2dx32*self.f3 ) * self.f3 + ( self.df2dx3 ) * self.f4 + ( self.d2f2dx1x5*self.f1 + self.df2dx2*self.df2dx5 + self.d2f2dx52*self.f5 ) * self.f5 + ( self.df2dx5 ) * self.f6 ) def hs(self,X): return( (self.rj**2)*self.k_spr*self.b_spr*( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) + np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) ) def Lfhs(self,X): return( (self.rj**2)*self.k_spr*(self.b_spr**2)*( -(self.rj*self.f1 - self.rm*self.f3)*( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) ) + (self.rj*self.f1 + self.rm*self.f5)*( np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) ) ) def Lf2hs(self,X): return( (self.rj**2)*self.k_spr*(self.b_spr**2)*( ( self.b_spr*(self.rj*self.f1 - self.rm*self.f3)**2 - self.rj*self.f2 + self.rm*self.f4 ) * np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) + ( self.b_spr*(self.rj*self.f1 + self.rm*self.f5)**2 + self.rj*self.f2 + self.rm*self.f6 ) * np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) ) # def Phi(self,X): # return( # np.matrix([[ # self.h0(X), # self.Lfh0(X), # self.Lf2h0(X), # self.Lf3h0(X), # self.hs(X), # self.Lfhs(X) # ]]).T # ) def v0(self,X,x1d): result = ( x1d[4] + self.k0[3]*(x1d[3]-self.Lf3h0(X)) + self.k0[2]*(x1d[2]-self.Lf2h0(X)) + self.k0[1]*(x1d[1]-self.Lfh0(X)) + self.k0[0]*(x1d[0]-self.h0(X)) ) return(result) def vs(self,X,Sd): result =( Sd[2] + self.ks[1]*(Sd[1]-self.Lfhs(X)) + self.ks[0]*(Sd[0]-self.hs(X)) ) return(result) def Q(self,X): B = np.matrix([ [1/(self.Jm*self.Ij),0], [0,1/self.Jm] ]) W = self.rj*self.rm*self.k_spr*self.b_spr*np.matrix([ [ np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])), -np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ], [ self.rj*self.b_spr*( np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0])) ), self.rj*self.b_spr*( np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0])) ) ] ]) return(B*W) def return_input(self,X,x1d,Sd): try: Q_inv = self.Q(X)**(-1) except: import ipdb; ipdb.set_trace() return( Q_inv * ( np.matrix([[-self.Lf4h0(X),-self.Lf2hs(X)]]).T + np.matrix([[self.v0(X,x1d),self.vs(X,Sd)]]).T ) ) def forward_simulation_FL(self,Time,X_o,X1d,Sd): assert len(X_o)==6, "X_o must have 6 elements, not " +
<gh_stars>1-10 """ common ~~~~~~ Utilities and other commonly reused functions for pjinoise. """ from typing import Any, List, Mapping, Sequence, Tuple, Union import numpy as np from PIL import Image from pjinoise.constants import SUPPORTED_FORMATS, X, Y, Z # General purpose functions. def convert_color_space(a: np.ndarray, src_space: str = '', dst_space: str = 'RGB') -> np.ndarray: """Convert an array to the given color space. :param src_space: (Optional.) This is the identifier for the current color space of the image data. These identifiers are either an empty string to represent pjinoise grayscale or a color mode used by the pillow module (see below). :param dst_space: (Optional.) This is the identifier for the destination color space of the image data. These identifiers are either an empty string to represent pjinoise grayscale or a color mode used by the pillow module (see below). :return: :class:ndarray object. :rtype: numpy.ndarray Color Modes ----------- The color modes used by the pillow library can be found here: https://pillow.readthedocs.io/en/stable/handbook/ concepts.html#concept-modes """ # The shape of the output is based on the space, so we can't # build out until we do the first conversion. However, setting # it to None here makes the process of detecting whether we've # set up the output array a little smoother later. out = None # Most of pjinoise tries to work with grayscale color values # that go from zero to one. However, pillow's grayscale mode # is 'L', which represents the color as an unsigned 8 bit # integer. The data will need to at least be in mode 'L' for # pillow to be able to convert the color space. if src_space == '': assert np.max(a) <= 1.0 a = np.around(a * 0xff).astype(np.uint8) src_space = 'L' # The pjinoise grayscale isn't a mode that is recognized by # pillow, so we'll need pillow to convert it to its grayscale # first (mode 'L'). dst_is_pjinoise_grayscale = False if dst_space == '': dst_is_pjinoise_grayscale = True dst_space = 'L' # PIL.image.convert can only convert two-dimensional (or three, # with color channel being the third) images. So, for animations # we have to iterate through the Z axis, converting one frame at # a time. Since pjinoise thinks of still images as single frame # animations, this means we're always going to have to handle # the Z axis like this. for i in range(a.shape[Z]): img = Image.fromarray(a[i], mode=src_space) img = img.convert(dst_space) a_img = np.array(img) if out is None: out = np.zeros((a.shape[Z], *a_img.shape), dtype=np.uint8) out[i] = a_img # If we are converting to pjinoise grayscale, need to take the # eight-bit integers from pillow and turn them into the pjinoise # grayscale floats. if dst_is_pjinoise_grayscale: out = out.astype(float) / 0xff return out def grayscale_to_ints_list(a: np.ndarray, astype: type = np.uint8) -> List[int]: """pjinoise grayscale stores color values as floats between zero and one. This is a pain to read on a screen or type expected values for. This function converts that to lists of integers for easier test comparison and printing. """ a = a.copy() a = np.around(a * 0xff) a = a.astype(astype) return a.tolist() def get_format(filename: str) -> str: """Determine the image type based on the filename.""" name_part = filename.split('.')[-1] extension = name_part.casefold() try: return SUPPORTED_FORMATS[extension] except KeyError: print(f'The file type {name_part} is not supported.') supported = ', '.join(SUPPORTED_FORMATS) print(f'The supported formats are: {supported}.') raise SystemExit # Diagnostic output functions. def print_array(a: np.ndarray, depth: int = 0, color: bool = True) -> None: """Write the values of the given array to stdout.""" if len(a.shape) > 1: print(' ' * (4 * depth) + '[') for i in range(a.shape[0]): print_array(a[i], depth + 1, color) print(' ' * (4 * depth) + '],') else: if a.dtype != np.uint8 and color: a = (a.copy() * 0xff).astype(np.uint8) tmp = '0x{:02x}' else: tmp = '{}' nums = [tmp.format(n) for n in a] print(' ' * (4 * depth) + '[' + ', '.join(nums) + '],') def print_float_array(a: np.ndarray, depth: int = 0) -> None: """Write the values of the given array to stdout.""" if len(a.shape) > 1: print(' ' * (4 * depth) + '[') for i in range(a.shape[0]): print_float_array(a[i], depth + 1) print(' ' * (4 * depth) + '],') else: tmp = '{:05.2f}' nums = [tmp.format(n) for n in a] print(' ' * (4 * depth) + '[' + ', '.join(nums) + '],') def print_seq(seq: Sequence[Any], depth: int = 0) -> None: """Write the values of the given sequence to stdout.""" print_array(np.array(seq), depth, False) # Serialization/deserialization functions. def deserialize_sequence(value: Union[Sequence[float], str]) -> Tuple[float]: """Deserialize a set of coordinates that could have come from command line input. """ if not value: return (0, 0, 0) if isinstance(value, str): value = value.split(',')[::-1] return tuple(float(n) for n in value) def remove_private_attrs(mapping: Mapping) -> Mapping: """Remove the keys for private attributes from an object that has been serialized as an mapping. """ cls = type(mapping) public_keys = [key for key in mapping if not key.startswith('_')] dict_ = {key: mapping[key] for key in public_keys} return cls(dict_) def text_to_int(text: Union[bytes, str, int, None]) -> Union[int, None]: if isinstance(text, int) or text is None: return text if isinstance(text, str): text = bytes(text, 'utf_8') return int.from_bytes(text, 'little') # Interpolation functions. def lerp(a: np.ndarray, b: np.ndarray, x: np.ndarray) -> np.ndarray: """Perform a linear interpolation on the values of two arrays :param a: The "left" values. :param b: The "right" values. :param x: An array of how close the location of the final value should be to the "left" value. :return: A :class:ndarray object :rtype: numpy.ndarray Usage:: >>> import numpy as np >>> >>> a = np.array([1, 2, 3]) >>> b = np.array([3, 4, 5]) >>> x = np.array([.5, .5, .5]) >>> lerp(a, b, x) array([2., 3., 4.]) """ return a.astype(float) * (1 - x.astype(float)) + b.astype(float) * x def trilinear_interpolation(a: np.ndarray, factor: float) -> np.ndarray: """Resize an three dimensional array using trilinear interpolation. :param a: The array to resize. The array is expected to have at least three dimensions. :param factor: The amount to resize the array. Given how the interpolation works, you probably don't get great results with factor less than or equal to .5. Consider multiple passes of interpolation with larger factors in those cases. :return: A :class:ndarray object. :rtype: numpy.ndarray Usage:: >>> import numpy as np >>> >>> a = np.array([ ... [ ... [0, 1], ... [1, 0], ... ], ... [ ... [1, 0], ... [0, 1], ... ], ... ]) >>> trilinear_interpolation(a, 2) array([[[0. , 0.5, 1. , 1. ], [0.5, 0.5, 0.5, 0.5], [1. , 0.5, 0. , 0. ], [1. , 0.5, 0. , 0. ]], <BLANKLINE> [[0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]], <BLANKLINE> [[1. , 0.5, 0. , 0. ], [0.5, 0.5, 0.5, 0.5], [0. , 0.5, 1. , 1. ], [0. , 0.5, 1. , 1. ]], <BLANKLINE> [[1. , 0.5, 0. , 0. ], [0.5, 0.5, 0.5, 0.5], [0. , 0.5, 1. , 1. ], [0. , 0.5, 1. , 1. ]]]) """ # Return the array unchanged if the array won't be magnified. if factor == 1: return a # Perform a defensive copy of the original array to avoid # unexpected side effects. a = a.copy() # Since we are magnifying the given array, the new array's shape # will increase by the magnification factor. mag_size = tuple(int(s * factor) for s in a.shape) # Map out the relationship between the old space and the # new space. indices = np.indices(mag_size) if factor > 1: whole = (indices // factor).astype(int) parts = (indices / factor - whole).astype(float) else: new_ends = [s - 1 for s in mag_size] old_ends = [s - 1 for s in a.shape] true_factors = [n / o for n, o in zip(new_ends, old_ends)] for i in range(len(true_factors)): if true_factors[i] == 0: true_factors[i] = .5 whole = indices.copy() parts = indices.copy() for i in Z,
# -*- coding: utf-8 -*- import pandas as pd import os import configparser, ast import copy #Constants kbh="20836612225.1252" # Boltzmann constant divided by Planck constant, s^-1, string. kbev="8.617333262145E-5" # Boltzmann constant in eV·K−1, string. avogadro=6.02214199E23 # Avogadro's constant. def readconf(filename='./parameters.txt'): """This function reads the input parameters from a file Args: filename: Input file in Windows .ini format. Comments should be provided as "#" Returns: conf: Configuration data. """ conf=configparser.ConfigParser(inline_comment_prefixes=('#')) conf.read(filename) return conf def rxntime(conf) : """Subroutine that interpretes the time. Requires the ast package Args: conf: Configuration data. """ time1raw=conf['Reactor']['time1'] if time1raw.find(',') >0 : # If it is a list timel=True time1=ast.literal_eval(time1raw) else : # If it is an unique value timel=False time1=time1raw return time1, timel def read(filename='./itm.csv') : """This function reads a file containing information of species in gas, aqu(eous), or adsorbed on cat(alyst). It can also read the reactions file. . It requires pandas to be loaded. Args: filename: Input file. The columns are separated by one or more spaces. Energies must be provided in eV and frequencies in cm-1 Format: Returns: dicint: a dictionary containing at least the tags, energies, and frequencies of all species. """ dic=pd.read_csv(filename, delim_whitespace=True, index_col='label').T.to_dict() return dic def get_damptime(conf) : """Parse pressure damp from configuration file Args: conf: Configuration data. Returns: dampt1: Pressure damp in processing. dampt2: Pressure damp in post-processing. """ try : damptime=float(conf['Reactor']['damptime']) except : damptime=1.0 if damptime>1E-13 : dampt1="*(1-exp(-"+"{:.6E}".format(damptime)+"*t))^2" dampt2="*(1-exp(-"+"{:.6E}".format(damptime)+"*timei))^2" else : dampt1="" dampt2="" return dampt1, dampt2 def get_elecpot(conf) : """This function extracts the electric potential vs RHE from the configuration file. returns the electric potential vs SHE. """ try : elecpot=(float(conf['Electrochemistry']['electricpotentialrhe'])- float(conf['Electrochemistry']['pH'])*float(kbeV)* float(conf['Reactor']['reactortemp'])*ln(10.0) ) except : elecpot=0.0 return elecpot def get_nelect_for_itm(itm,item,label) : if item==None : nelect=0.0 else : try : nelect=float(itm[item][label]) except : nelect=0.0 print("Nasty problem found in ",item) return nelect def get_nelect_for_rxn(conf,itm,rxn) : """Get the number of electrons for a particular transition state from alpha values """ try : label=conf['Electrochemistry']['nelectronslabel'] except : label="ne" for item in sorted(rxn) : rxn[item][label]=((1-float(rxn[item][alpha]))*( get_nelect_for_itm(itm,rxn[item]['is1'],label)+ get_nelect_for_itm(itm,rxn[item]['is2'],label))+ float(rxn[item][alpha])*( get_nelect_for_itm(itm,rxn[item]['fs1'],label)+ get_nelect_for_itm(itm,rxn[item]['fs2'],label))) def adjust_energy_with_potential(conf,itm,elecpot) : """Adds the electric potential component to the Gibbs energy. """ try : label=conf['Electrochemistry']['nelectronslabel'] except : label="ne" for item in sorted(itm) : try : itm[item]['G']=float(itm[item]['G'])+float(itm[item][label])*elecpot except : print("Error found adjusting the potential of ", item) print(item) print(label) print(itm[item]['G']) print(float(itm[item]['G'])) print(float(itm[item][label])) exit() def process_intermediates(conf,itm,ltp) : """This function process the "intermediates" dataframe to generate the site-balance equation, the SODE-solver, and the initial conditions as clean surface. It also initializes the list of differential equations. Args: conf: Configuration data. itm: Dict of dicts containing at least a list of intermediates as index. Returns: itm: Expanded dict of dicts containing also the list of differential equations. (Mutable) sbalance: Site-balance equation. (Unmutable) sodesolv: Input of the SODE-solver. (Unmutable) initialc: Initial conditions as clean surface. (Unmutable) """ # Initialize variables related to intermediates. sbalance="c"+conf['Catalyst']['sitebalancespecies']+":=(t)-> 1.0" sodesolv="Solution:=dsolve({" initialc="IC0:=" rhsparse="" index=1 # Initialize list-to-print for postprocessing ltp['prs']=[] # ltp of pressures and concentrations-in-second-layer. ltp['itm']=["sc"+conf['Catalyst']['sitebalancespecies']] # ltp of interm.: init w/ s-b species #ltp['itm']=[conf['Catalyst']['sitebalancespecies']] # ltp of interm.: init w/ s-b species # Process intermediates, starting by adsorbed (cat), then gas. for item in sorted(itm) : # SERGIO: # for key,value in sorted(itm).items() : #key~item ; value~itM[item] (all line) # so the input of the sub-function will be the key and value if itm[item]['phase']=='cat' and item!=conf['Catalyst']['sitebalancespecies'] : # A surface species # Initialize diff equations to count in which reactions each species participate itm[item]['diff']="eqd"+item+":=diff(c"+item+"(t),t)=" #value['diff']="eqd"+key+":=diff(c"+key+"(t),t)=" # Prepare site balance sbalance+=" -c"+item+"(t)" # Prepare list of differential equations for the SODE solver sodesolv+="eqd"+item+", " # Prepare list of default initial conditions as clean surface initialc+=" c"+item+"(0.0)=0.0," # Prepare parser of concentrations after SODE is solved index+=1 # First element should be 1+1=2. Do not touch. rhsparse+="sc"+item+":=rhs(S["+str(index)+"]) : " # List of reactions for fprintf function in Maple ltp['itm'].append("sc"+item) elif itm[item]['phase']=='gas' : # Get partial pressures try : itm[item]['pressure']=conf['Pressures'][item] except : itm[item]['pressure']=0 # Generate list of pressures ltp['prs'].append("P"+item) elif itm[item]['phase']=='aqu' : # Get concentrations and convert to molecules/activesite. try : itm[item]['concentration']=(float(conf['Concentrations'][item])* float(conf['Catalyst']['areaactivesite'])* float(conf['Catalyst']['secondlayerthickness'])* avogadro*1E-27) except : itm[item]['concentration']=0.0 # Generate list-to-print of concentrations-in-the-second-layer; put along pressures. ltp['prs'].append("CSL"+item) elif item!=conf['Catalyst']['sitebalancespecies'] : print("Unknown phase for ",item,itm[item]['phase'], "\n I only recognize 'aqu', 'cat', and 'gas'") exit() # Close the site-balance equation sbalance=sbalance+" : " # Close the sodesolv sodesolv=sodesolv+"IC0}, numeric, method=rosenbrock, maxfun=0, abserr=1E-16, interr=false);" # In the initial conditions, replace the last comma by a colon initialc=initialc[:-1]+" : " return itm, sbalance, sodesolv, initialc, rhsparse def is_gas(itm,rxn,item,state) : """ Returns 1 if a given (initial/final) state of rxn #item is gas. Returns 0 otherwise. """ #print(item,state,rxn[item][state],itm['gP']['phase']) if rxn[item][state]=='None' or rxn[item][state]==None : gas=0 else : if itm[rxn[item][state]]['phase']=='gas' : gas=1 elif itm[rxn[item][state]]['phase']=='cat' or itm[rxn[item][state]]['phase']=='aqu' : gas=0 else : print("Phase of rxn#",item," intermediate ",rxn[item][state],":", itm[rxn[item][state]]['phase'],"Not recognized" ) return gas def mw_gas(itm,rxn,item,state) : """ Returns the mass weight of a gas-phase intermediate. Zero if adsorbed """ if rxn[item][state]=='None' or rxn[item][state]==None : mw=0 else : if itm[rxn[item][state]]['phase']=='gas' : mw=float(itm[rxn[item][state]]['mw']) elif itm[rxn[item][state]]['phase']=='cat' : mw=0 elif itm[rxn[item][state]]['phase']=='aqu' : mw=0 else : print("Phase of rxn#",item," intermediate ",rxn[item][state],":", itm[rxn[item][state]]['phase'],"Not recognized" ) return mw def kinetic_constants(conf,itm,rxn,item) : """ Prepares the kinetic constants for direct and (i)reverse semireactions depending on the number of gas-phase intermediates. Returns error if there are more than two species in gas for a given semirxn. """ rxn[item]['kd']="" rxn[item]['ki']="" howmanygasd=is_gas(itm,rxn,item,'is1')+is_gas(itm,rxn,item,'is2') howmanygasi=is_gas(itm,rxn,item,'fs1')+is_gas(itm,rxn,item,'fs2') area="{:.6f}".format( float(conf['Catalyst']['areaactivesite']) ) # Site area in Ų # Direct semireaction: if howmanygasd==0 : # If semireaction on surface: use Arrhenius kb*T/h*exp(-Ga/kB*T) rxn[item]['kd']="k"+item+"d:=evalf("+kbh+"*T*exp(-max(0.0,"+\ "{:.6f}".format( rxn[item]['aGd'])+","+\ "{:.6f}".format( rxn[item]['dGd'])+\ ")/("+kbev+"*T)) ) : " elif howmanygasd==1 : mw="{:.6f}".format(mw_gas(itm,rxn,item,'is1')+mw_gas(itm,rxn,item,'is2')) # (atm=>Pa)*Area*(Ų=>m²) rxn[item]['kd']="k"+item+"d:=evalf((101325*"+area+"*1E-20"+\ "*exp(-max(0.0,"+\ "{:.6f}".format( rxn[item]['aGd'])+","+\ "{:.6f}".format( rxn[item]['dGd'])+\ ")/("+kbev+"*T)))"+\ "/sqrt(2*Pi*1.6605390400E-27*"+mw+"*1.3806485200E-23*T )) : " # Denominator: sqrt(2Pi(elemmass@kg)*massweight*kB(SI)*T else : print("WARNING! direct reaction #",item,"has",howmanygasd,"gas/aq reactants.") print("Abnormal termination") exit() # Reverse (i) semireaction: if howmanygasi==0 : rxn[item]['ki']="k"+item+"i:=evalf("+kbh+"*T*exp(-max(0.0,"+\ "{:.6f}".format( rxn[item]['aGi'])+","+\ "{:.6f}".format(-rxn[item]['dGd'])+\ ")/("+kbev+"*T)) ) : " elif howmanygasi==1 : mw="{:.6f}".format(mw_gas(itm,rxn,item,'fs1')+mw_gas(itm,rxn,item,'fs2')) rxn[item]['ki']="k"+item+"i:=evalf((101325*"+area+"*1E-20"+\ "*exp(-max(0.0,"+\ "{:.6f}".format( rxn[item]['aGi'])+","+\ "{:.6f}".format(-rxn[item]['dGd'])+\ ")/("+kbev+"*T)))"+\ "/sqrt(2*Pi*1.6605390400E-27*"+mw+"*1.3806485200E-23*T )) : " else : print("WARNING! reverse reaction #",item,"has",howmanygasd,"gas/aq reactants.") print("Abnormal termination") exit() def process_itm_on_rxn(conf,itm,rxn,item,state,dampt1,dampt2) : """ Use the is/fs states for each reaction to get their activation energies. Then write the formula for reaction rate according to their intermediates. This formula is split between rtd (direct part) and rti (inverse part). Then update the differential equations in which each adsorbed species participates. If a rectant is in gas phase, include a Hertz-Knudsen term in the constant and its pressure as variable in the reaction rate. """ if state=='is1' or state=='is2' : semirxn='d' sign='-' # Consume reactants elif state=='fs1' or state=='fs2' : semirxn='i' sign='+' # Increase products else : print("Wrong state for reaction", item, "\nOnly 'is1', 'is2', 'fs1', and 'fs2' supported") exit() # Get energy of the (initial/final) state "i"
'Unknown': 1, 'Fifo': 2, 'DevChar': 3, 'Directory': 4, 'DevBlock': 5, 'File': 6, 'Symlink': 7, 'Socket': 8, 'WhiteOut': 9, }, 'DnDAction': { 'Ignore': 0, 'Copy': 1, 'Move': 2, 'Link': 3, }, 'DirectoryOpenFlag': { 'None': 0, 'NoSymlinks': 1, }, 'MediumState': { 'NotCreated': 0, 'Created': 1, 'LockedRead': 2, 'LockedWrite': 3, 'Inaccessible': 4, 'Creating': 5, 'Deleting': 6, }, 'MediumType': { 'Normal': 0, 'Immutable': 1, 'Writethrough': 2, 'Shareable': 3, 'Readonly': 4, 'MultiAttach': 5, }, 'MediumVariant': { 'Standard': 0, 'VmdkSplit2G': 0x01, 'VmdkRawDisk': 0x02, 'VmdkStreamOptimized': 0x04, 'VmdkESX': 0x08, 'VdiZeroExpand': 0x100, 'Fixed': 0x10000, 'Diff': 0x20000, 'NoCreateDir': 0x40000000, }, 'DataType': { 'Int32': 0, 'Int8': 1, 'String': 2, }, 'DataFlags': { 'None': 0x00, 'Mandatory': 0x01, 'Expert': 0x02, 'Array': 0x04, 'FlagMask': 0x07, }, 'MediumFormatCapabilities': { 'Uuid': 0x01, 'CreateFixed': 0x02, 'CreateDynamic': 0x04, 'CreateSplit2G': 0x08, 'Differencing': 0x10, 'Asynchronous': 0x20, 'File': 0x40, 'Properties': 0x80, 'TcpNetworking': 0x100, 'VFS': 0x200, 'Discard': 0x400, 'Preferred': 0x800, 'CapabilityMask': 0xFFF, }, 'KeyboardLED': { 'NumLock': 0x01, 'CapsLock': 0x02, 'ScrollLock': 0x04, }, 'MouseButtonState': { 'LeftButton': 0x01, 'RightButton': 0x02, 'MiddleButton': 0x04, 'WheelUp': 0x08, 'WheelDown': 0x10, 'XButton1': 0x20, 'XButton2': 0x40, 'MouseStateMask': 0x7F, }, 'TouchContactState': { 'None': 0x00, 'InContact': 0x01, 'InRange': 0x02, 'ContactStateMask': 0x03, }, 'FramebufferCapabilities': { 'UpdateImage': 0x01, 'VHWA': 0x02, 'VisibleRegion': 0x04, }, 'GuestMonitorStatus': { 'Disabled': 0, 'Enabled': 1, 'Blank': 2, }, 'ScreenLayoutMode': { 'Apply': 0, 'Reset': 1, }, 'NetworkAttachmentType': { 'Null': 0, 'NAT': 1, 'Bridged': 2, 'Internal': 3, 'HostOnly': 4, 'Generic': 5, 'NATNetwork': 6, }, 'NetworkAdapterType': { 'Null': 0, 'Am79C970A': 1, 'Am79C973': 2, 'I82540EM': 3, 'I82543GC': 4, 'I82545EM': 5, 'Virtio': 6, }, 'NetworkAdapterPromiscModePolicy': { 'Deny': 1, 'AllowNetwork': 2, 'AllowAll': 3, }, 'PortMode': { 'Disconnected': 0, 'HostPipe': 1, 'HostDevice': 2, 'RawFile': 3, 'TCP': 4, }, 'USBControllerType': { 'Null': 0, 'OHCI': 1, 'EHCI': 2, 'XHCI': 3, 'Last': 4, }, 'USBConnectionSpeed': { 'Null': 0, 'Low': 1, 'Full': 2, 'High': 3, 'Super': 4, 'SuperPlus': 5, }, 'USBDeviceState': { 'NotSupported': 0, 'Unavailable': 1, 'Busy': 2, 'Available': 3, 'Held': 4, 'Captured': 5, }, 'USBDeviceFilterAction': { 'Null': 0, 'Ignore': 1, 'Hold': 2, }, 'AudioDriverType': { 'Null': 0, 'WinMM': 1, 'OSS': 2, 'ALSA': 3, 'DirectSound': 4, 'CoreAudio': 5, 'MMPM': 6, 'Pulse': 7, 'SolAudio': 8, }, 'AudioControllerType': { 'AC97': 0, 'SB16': 1, 'HDA': 2, }, 'AudioCodecType': { 'Null': 0, 'SB16': 1, 'STAC9700': 2, 'AD1980': 3, 'STAC9221': 4, }, 'AuthType': { 'Null': 0, 'External': 1, 'Guest': 2, }, 'Reason': { 'Unspecified': 0, 'HostSuspend': 1, 'HostResume': 2, 'HostBatteryLow': 3, 'Snapshot': 4, }, 'StorageBus': { 'Null': 0, 'IDE': 1, 'SATA': 2, 'SCSI': 3, 'Floppy': 4, 'SAS': 5, 'USB': 6, 'PCIe': 7, }, 'StorageControllerType': { 'Null': 0, 'LsiLogic': 1, 'BusLogic': 2, 'IntelAhci': 3, 'PIIX3': 4, 'PIIX4': 5, 'ICH6': 6, 'I82078': 7, 'LsiLogicSas': 8, 'USB': 9, 'NVMe': 10, }, 'ChipsetType': { 'Null': 0, 'PIIX3': 1, 'ICH9': 2, }, 'NATAliasMode': { 'AliasLog': 0x1, 'AliasProxyOnly': 0x02, 'AliasUseSamePorts': 0x04, }, 'NATProtocol': { 'UDP': 0, 'TCP': 1, }, 'BandwidthGroupType': { 'Null': 0, 'Disk': 1, 'Network': 2, }, 'VBoxEventType': { 'Invalid': 0, 'Any': 1, 'Vetoable': 2, 'MachineEvent': 3, 'SnapshotEvent': 4, 'InputEvent': 5, 'LastWildcard': 31, 'OnMachineStateChanged': 32, 'OnMachineDataChanged': 33, 'OnExtraDataChanged': 34, 'OnExtraDataCanChange': 35, 'OnMediumRegistered': 36, 'OnMachineRegistered': 37, 'OnSessionStateChanged': 38, 'OnSnapshotTaken': 39, 'OnSnapshotDeleted': 40, 'OnSnapshotChanged': 41, 'OnGuestPropertyChanged': 42, 'OnMousePointerShapeChanged': 43, 'OnMouseCapabilityChanged': 44, 'OnKeyboardLedsChanged': 45, 'OnStateChanged': 46, 'OnAdditionsStateChanged': 47, 'OnNetworkAdapterChanged': 48, 'OnSerialPortChanged': 49, 'OnParallelPortChanged': 50, 'OnStorageControllerChanged': 51, 'OnMediumChanged': 52, 'OnVRDEServerChanged': 53, 'OnUSBControllerChanged': 54, 'OnUSBDeviceStateChanged': 55, 'OnSharedFolderChanged': 56, 'OnRuntimeError': 57, 'OnCanShowWindow': 58, 'OnShowWindow': 59, 'OnCPUChanged': 60, 'OnVRDEServerInfoChanged': 61, 'OnEventSourceChanged': 62, 'OnCPUExecutionCapChanged': 63, 'OnGuestKeyboard': 64, 'OnGuestMouse': 65, 'OnNATRedirect': 66, 'OnHostPCIDevicePlug': 67, 'OnVBoxSVCAvailabilityChanged': 68, 'OnBandwidthGroupChanged': 69, 'OnGuestMonitorChanged': 70, 'OnStorageDeviceChanged': 71, 'OnClipboardModeChanged': 72, 'OnDnDModeChanged': 73, 'OnNATNetworkChanged': 74, 'OnNATNetworkStartStop': 75, 'OnNATNetworkAlter': 76, 'OnNATNetworkCreationDeletion': 77, 'OnNATNetworkSetting': 78, 'OnNATNetworkPortForward': 79, 'OnGuestSessionStateChanged': 80, 'OnGuestSessionRegistered': 81, 'OnGuestProcessRegistered': 82, 'OnGuestProcessStateChanged': 83, 'OnGuestProcessInputNotify': 84, 'OnGuestProcessOutput': 85, 'OnGuestFileRegistered': 86, 'OnGuestFileStateChanged': 87, 'OnGuestFileOffsetChanged': 88, 'OnGuestFileRead': 89, 'OnGuestFileWrite': 90, 'OnVideoCaptureChanged': 91, 'OnGuestUserStateChanged': 92, 'OnGuestMultiTouch': 93, 'OnHostNameResolutionConfigurationChange': 94, 'OnSnapshotRestored': 95, 'OnMediumConfigChanged': 96, 'Last': 97, }, 'GuestMouseEventMode': { 'Relative': 0, 'Absolute': 1, }, 'GuestMonitorChangedEventType': { 'Enabled': 0, 'Disabled': 1, 'NewOrigin': 2, }, } __dValuesSym = { 'SettingsVersion': { 'Null': 'Null', 'v1_0': 'v1_0', 'v1_1': 'v1_1', 'v1_2': 'v1_2', 'v1_3pre': 'v1_3pre', 'v1_3': 'v1_3', 'v1_4': 'v1_4', 'v1_5': 'v1_5', 'v1_6': 'v1_6', 'v1_7': 'v1_7', 'v1_8': 'v1_8', 'v1_9': 'v1_9', 'v1_10': 'v1_10', 'v1_11': 'v1_11', 'v1_12': 'v1_12', 'v1_13': 'v1_13', 'v1_14': 'v1_14', 'v1_15': 'v1_15', 'v1_16': 'v1_16', 'Future': 'Future', }, 'AccessMode': { 'ReadOnly': 'ReadOnly', 'ReadWrite': 'ReadWrite', }, 'MachineState': { 'Null': 'Null', 'PoweredOff': 'PoweredOff', 'Saved': 'Saved', 'Teleported': 'Teleported', 'Aborted': 'Aborted', 'Running': 'Running', 'Paused': 'Paused', 'Stuck': 'Stuck', 'Teleporting': 'Teleporting', 'LiveSnapshotting': 'LiveSnapshotting', 'Starting': 'Starting', 'Stopping': 'Stopping', 'Saving': 'Saving', 'Restoring': 'Restoring', 'TeleportingPausedVM': 'TeleportingPausedVM', 'TeleportingIn': 'TeleportingIn', 'FaultTolerantSyncing': 'FaultTolerantSyncing', 'DeletingSnapshotOnline': 'DeletingSnapshotOnline', 'DeletingSnapshotPaused': 'DeletingSnapshotPaused', 'OnlineSnapshotting': 'OnlineSnapshotting', 'RestoringSnapshot': 'RestoringSnapshot', 'DeletingSnapshot': 'DeletingSnapshot', 'SettingUp': 'SettingUp', 'Snapshotting': 'Snapshotting', 'FirstOnline': 'FirstOnline', 'LastOnline': 'LastOnline', 'FirstTransient': 'FirstTransient', 'LastTransient': 'LastTransient', }, 'SessionState': { 'Null': 'Null', 'Unlocked': 'Unlocked', 'Locked': 'Locked', 'Spawning': 'Spawning', 'Unlocking': 'Unlocking', }, 'CPUPropertyType': { 'Null': 'Null', 'PAE': 'PAE', 'LongMode': 'LongMode', 'TripleFaultReset': 'TripleFaultReset', 'APIC': 'APIC', 'X2APIC': 'X2APIC', }, 'HWVirtExPropertyType': { 'Null': 'Null', 'Enabled': 'Enabled', 'VPID': 'VPID', 'NestedPaging': 'NestedPaging', 'UnrestrictedExecution': 'UnrestrictedExecution', 'LargePages': 'LargePages', 'Force': 'Force', }, 'ParavirtProvider': { 'None': 'None', 'Default': 'Default', 'Legacy': 'Legacy', 'Minimal': 'Minimal', 'HyperV': 'HyperV', 'KVM': 'KVM', }, 'FaultToleranceState': { 'Inactive': 'Inactive', 'Master': 'Master', 'Standby': 'Standby', }, 'LockType': { 'Null': 'Null', 'Shared': 'Shared', 'Write': 'Write', 'VM': 'VM', }, 'SessionType': { 'Null': 'Null', 'WriteLock': 'WriteLock', 'Remote': 'Remote', 'Shared': 'Shared', }, 'DeviceType': { 'Null': 'Null', 'Floppy': 'Floppy', 'DVD': 'DVD', 'HardDisk': 'HardDisk', 'Network': 'Network', 'USB': 'USB', 'SharedFolder': 'SharedFolder', 'Graphics3D': 'Graphics3D', }, 'DeviceActivity': { 'Null': 'Null', 'Idle': 'Idle', 'Reading': 'Reading', 'Writing': 'Writing', }, 'ClipboardMode': { 'Disabled': 'Disabled', 'HostToGuest': 'HostToGuest', 'GuestToHost': 'GuestToHost', 'Bidirectional': 'Bidirectional', }, 'DnDMode': { 'Disabled': 'Disabled', 'HostToGuest': 'HostToGuest', 'GuestToHost': 'GuestToHost', 'Bidirectional': 'Bidirectional', }, 'Scope': { 'Global': 'Global', 'Machine': 'Machine', 'Session': 'Session', }, 'BIOSBootMenuMode': { 'Disabled': 'Disabled', 'MenuOnly': 'MenuOnly', 'MessageAndMenu': 'MessageAndMenu', }, 'APICMode': { 'Disabled': 'Disabled', 'APIC': 'APIC', 'X2APIC': 'X2APIC', }, 'ProcessorFeature': { 'HWVirtEx': 'HWVirtEx', 'PAE': 'PAE', 'LongMode': 'LongMode', 'NestedPaging': 'NestedPaging', }, 'FirmwareType': { 'BIOS': 'BIOS', 'EFI': 'EFI', 'EFI32': 'EFI32', 'EFI64': 'EFI64', 'EFIDUAL': 'EFIDUAL', }, 'PointingHIDType': { 'None': 'None', 'PS2Mouse': 'PS2Mouse', 'USBMouse': 'USBMouse', 'USBTablet': 'USBTablet', 'ComboMouse': 'ComboMouse', 'USBMultiTouch': 'USBMultiTouch', }, 'KeyboardHIDType': { 'None': 'None', 'PS2Keyboard': 'PS2Keyboard', 'USBKeyboard': 'USBKeyboard', 'ComboKeyboard': 'ComboKeyboard', }, 'BitmapFormat': { 'Opaque': 'Opaque', 'BGR': 'BGR', 'BGR0': 'BGR0', 'BGRA': 'BGRA', 'RGBA': 'RGBA', 'PNG': 'PNG', 'JPEG': 'JPEG', }, 'DhcpOpt': { 'SubnetMask': 'SubnetMask', 'TimeOffset': 'TimeOffset', 'Router': 'Router', 'TimeServer': 'TimeServer', 'NameServer': 'NameServer', 'DomainNameServer': 'DomainNameServer', 'LogServer': 'LogServer', 'Cookie': 'Cookie', 'LPRServer': 'LPRServer', 'ImpressServer': 'ImpressServer', 'ResourseLocationServer': 'ResourseLocationServer', 'HostName': 'HostName', 'BootFileSize': 'BootFileSize', 'MeritDumpFile': 'MeritDumpFile', 'DomainName': 'DomainName', 'SwapServer': 'SwapServer', 'RootPath': 'RootPath', 'ExtensionPath': 'ExtensionPath', 'IPForwardingEnableDisable': 'IPForwardingEnableDisable', 'NonLocalSourceRoutingEnableDisable': 'NonLocalSourceRoutingEnableDisable', 'PolicyFilter': 'PolicyFilter', 'MaximumDatagramReassemblySize': 'MaximumDatagramReassemblySize', 'DefaultIPTime2Live': 'DefaultIPTime2Live', 'PathMTUAgingTimeout': 'PathMTUAgingTimeout', 'IPLayerParametersPerInterface': 'IPLayerParametersPerInterface', 'InterfaceMTU': 'InterfaceMTU', 'AllSubnetsAreLocal': 'AllSubnetsAreLocal', 'BroadcastAddress': 'BroadcastAddress', 'PerformMaskDiscovery': 'PerformMaskDiscovery', 'MaskSupplier': 'MaskSupplier', 'PerformRouteDiscovery': 'PerformRouteDiscovery', 'RouterSolicitationAddress': 'RouterSolicitationAddress', 'StaticRoute': 'StaticRoute', 'TrailerEncapsulation': 'TrailerEncapsulation', 'ARPCacheTimeout': 'ARPCacheTimeout', 'EthernetEncapsulation': 'EthernetEncapsulation', 'TCPDefaultTTL': 'TCPDefaultTTL', 'TCPKeepAliveInterval': 'TCPKeepAliveInterval', 'TCPKeepAliveGarbage': 'TCPKeepAliveGarbage', 'NetworkInformationServiceDomain': 'NetworkInformationServiceDomain', 'NetworkInformationServiceServers': 'NetworkInformationServiceServers', 'NetworkTimeProtocolServers': 'NetworkTimeProtocolServers', 'VendorSpecificInformation': 'VendorSpecificInformation', 'Option_44': 'Option_44', 'Option_45': 'Option_45', 'Option_46': 'Option_46', 'Option_47': 'Option_47', 'Option_48': 'Option_48', 'Option_49': 'Option_49', 'IPAddressLeaseTime': 'IPAddressLeaseTime', 'Option_64': 'Option_64', 'Option_65': 'Option_65', 'TFTPServerName': 'TFTPServerName', 'BootfileName': 'BootfileName', 'Option_68': 'Option_68', 'Option_69': 'Option_69', 'Option_70': 'Option_70', 'Option_71': 'Option_71', 'Option_72': 'Option_72', 'Option_73': 'Option_73', 'Option_74': 'Option_74', 'Option_75': 'Option_75', 'Option_119': 'Option_119', }, 'DhcpOptEncoding': { 'Legacy': 'Legacy', 'Hex': 'Hex', }, 'VFSType': { 'File': 'File', 'Cloud': 'Cloud', 'S3': 'S3', 'WebDav': 'WebDav', }, 'ImportOptions': { 'KeepAllMACs': 'KeepAllMACs', 'KeepNATMACs': 'KeepNATMACs', 'ImportToVDI': 'ImportToVDI', }, 'ExportOptions': { 'CreateManifest': 'CreateManifest', 'ExportDVDImages': 'ExportDVDImages', 'StripAllMACs': 'StripAllMACs', 'StripAllNonNATMACs': 'StripAllNonNATMACs', }, 'CertificateVersion': { 'V1': 'V1', 'V2': 'V2', 'V3': 'V3', 'Unknown': 'Unknown', }, 'VirtualSystemDescriptionType': { 'Ignore': 'Ignore', 'OS': 'OS', 'Name': 'Name', 'Product': 'Product', 'Vendor': 'Vendor', 'Version': 'Version', 'ProductUrl': 'ProductUrl', 'VendorUrl': 'VendorUrl', 'Description': 'Description', 'License': 'License', 'Miscellaneous': 'Miscellaneous', 'CPU': 'CPU', 'Memory': 'Memory', 'HardDiskControllerIDE': 'HardDiskControllerIDE', 'HardDiskControllerSATA': 'HardDiskControllerSATA', 'HardDiskControllerSCSI': 'HardDiskControllerSCSI', 'HardDiskControllerSAS': 'HardDiskControllerSAS', 'HardDiskImage': 'HardDiskImage', 'Floppy': 'Floppy', 'CDROM': 'CDROM', 'NetworkAdapter': 'NetworkAdapter', 'USBController': 'USBController', 'SoundCard': 'SoundCard', 'SettingsFile': 'SettingsFile', }, 'VirtualSystemDescriptionValueType': { 'Reference': 'Reference', 'Original': 'Original', 'Auto': 'Auto', 'ExtraConfig': 'ExtraConfig', }, 'GraphicsControllerType': { 'Null': 'Null', 'VBoxVGA': 'VBoxVGA', 'VMSVGA': 'VMSVGA', }, 'CleanupMode': { 'UnregisterOnly': 'UnregisterOnly', 'DetachAllReturnNone': 'DetachAllReturnNone', 'DetachAllReturnHardDisksOnly': 'DetachAllReturnHardDisksOnly', 'Full': 'Full', }, 'CloneMode': { 'MachineState': 'MachineState', 'MachineAndChildStates': 'MachineAndChildStates', 'AllStates': 'AllStates', }, 'CloneOptions': { 'Link': 'Link', 'KeepAllMACs': 'KeepAllMACs', 'KeepNATMACs': 'KeepNATMACs', 'KeepDiskNames': 'KeepDiskNames', }, 'AutostopType': { 'Disabled': 'Disabled', 'SaveState': 'SaveState', 'PowerOff': 'PowerOff', 'AcpiShutdown': 'AcpiShutdown', }, 'HostNetworkInterfaceMediumType': { 'Unknown': 'Unknown', 'Ethernet': 'Ethernet', 'PPP': 'PPP', 'SLIP': 'SLIP', }, 'HostNetworkInterfaceStatus': { 'Unknown': 'Unknown', 'Up': 'Up', 'Down':
**kwargs): pass def MFnMesh_getFaceVertexColorIndex(*args, **kwargs): pass def delete_MFnData(*args, **kwargs): pass def MDGMessage_addNodeChangeUuidCheckCallback(*args, **kwargs): pass def MEdit_setFailed(*args, **kwargs): pass def array2dFloat_swigregister(*args, **kwargs): pass def array3dFloat_get(*args, **kwargs): pass def MFnSingleIndexedComponent_getCompleteData(*args, **kwargs): pass def MFnCameraSet_appendLayer(*args, **kwargs): pass def MFloatVectorArray_append(*args, **kwargs): pass def MUintArray___add__(*args, **kwargs): pass def MPlug_connectionByPhysicalIndex(*args, **kwargs): pass def new_MIffFile(*args, **kwargs): pass def MTime___ge__(*args, **kwargs): pass def MFnMesh_getColorSetNames(*args, **kwargs): pass def MPlug_node(*args, **kwargs): pass def MFnNonExtendedLight_setDepthMapResolution(*args, **kwargs): pass def MDGContext_assign(*args, **kwargs): pass def MRenderPassDef_getAttributeType(*args, **kwargs): pass def array3dInt_set(*args, **kwargs): pass def MItMeshPolygon_setUV(*args, **kwargs): pass def MFnSet_intersectsWith(*args, **kwargs): pass def MFnCompoundAttribute_getAddAttrCmds(*args, **kwargs): pass def MFnMesh_deleteUVSet(*args, **kwargs): pass def MFloatPoint_y_set(*args, **kwargs): pass def MUintArray_setLength(*args, **kwargs): pass def MPlug_isElement(*args, **kwargs): pass def MGlobal_setOptionVarValue(*args, **kwargs): pass def MFnMesh_getPointsAtUV(*args, **kwargs): pass def MDataBlock_inputValue(*args, **kwargs): pass def MRampAttribute_setRamp(*args, **kwargs): pass def MFnMesh_setCurrentUVSetName(*args, **kwargs): pass def MItMeshPolygon_center(*args, **kwargs): pass def delete_MFnGeometryData(*args, **kwargs): pass def new_MFnPointLight(*args, **kwargs): pass def MFnComponentListData_length(*args, **kwargs): pass def MFloatPoint___getitem__(*args, **kwargs): pass def MUint64Array_sizeIncrement(*args, **kwargs): pass def MPlugArray_append(*args, **kwargs): pass def MGlobal_addToModel(*args, **kwargs): pass def array3dFloat_set(*args, **kwargs): pass def MFnMesh_renameUVSet(*args, **kwargs): pass def MFnAnisotropyShader_correlationX(*args, **kwargs): pass def MDagPath_assign(*args, **kwargs): pass def MQuaternion_swigregister(*args, **kwargs): pass def MItMeshFaceVertex_getUV(*args, **kwargs): pass def MFnPluginData_constData(*args, **kwargs): pass def new_MFnComponent(*args, **kwargs): pass def MItMeshFaceVertex_vertId(*args, **kwargs): pass def MFloatPointArray_remove(*args, **kwargs): pass def MTypeId_id(*args, **kwargs): pass def MFnCameraSet_isLayerActive(*args, **kwargs): pass def MObjectHandle_assign(*args, **kwargs): pass def MGlobal_setComponentSelectionMask(*args, **kwargs): pass def MFnMesh_isRightHandedTangent(*args, **kwargs): pass def MFnAmbientLight_create(*args, **kwargs): pass def MDagPath_hasFn(*args, **kwargs): pass def MQuaternion_scaleIt(*args, **kwargs): pass def MFnSubd_polygonHasVertexUVs(*args, **kwargs): pass def new_MItMeshFaceVertex(*args, **kwargs): pass def MFnPhongEShader_whiteness(*args, **kwargs): pass def MFnCamera_isOrtho(*args, **kwargs): pass def MFloatMatrix_inverse(*args, **kwargs): pass def MTrimBoundaryArray_length(*args, **kwargs): pass def MCallbackIdArray_length(*args, **kwargs): pass def MGlobal_setHiliteList(*args, **kwargs): pass def MFnMesh_setVertexNormals(*args, **kwargs): pass def MFnReference_parentReference(*args, **kwargs): pass def intPtr_swigregister(*args, **kwargs): pass def MDAGDrawOverrideInfo_fDisplayType_get(*args, **kwargs): pass def MQuaternion_asEulerRotation(*args, **kwargs): pass def MItMeshEdge_cleanupSmoothing(*args, **kwargs): pass def delete_MFnPartition(*args, **kwargs): pass def MFnLight_lightAmbient(*args, **kwargs): pass def MFnCamera_isDepthOfField(*args, **kwargs): pass def new_MFloatMatrix(*args, **kwargs): pass def MPointOnMesh_faceIndex(*args, **kwargs): pass def MTransformationMatrix_rotatePivot(*args, **kwargs): pass def MCacheFormatDescription_getDescriptionInfo(*args, **kwargs): pass def MNurbsIntersector_className(*args, **kwargs): pass def MFnVolumeLight_setEmitAmbient(*args, **kwargs): pass def MFnMesh_getRawNormals(*args, **kwargs): pass def MFnContainerNode_isCurrent(*args, **kwargs): pass def MDagPathArray_set(*args, **kwargs): pass def MScriptUtil_setDouble2ArrayItem(*args, **kwargs): pass def boolPtr_cast(*args, **kwargs): pass def MItInstancer_instancerId(*args, **kwargs): pass def MPoint_z_get(*args, **kwargs): pass def MFnNurbsSurface_setUVs(*args, **kwargs): pass def MFnLight_lightDiffuse(*args, **kwargs): pass def MFnCamera_setHorizontalFieldOfView(*args, **kwargs): pass def MFloatArray_copy(*args, **kwargs): pass def MBoundingBox_contains(*args, **kwargs): pass def MNodeMessage_addNameChangedCallback(*args, **kwargs): pass def MFnVectorArrayData_array(*args, **kwargs): pass def MFnMesh_sortIntersectionFaceTriIds(*args, **kwargs): pass def MFnSubd_evaluatePosition(*args, **kwargs): pass def MFnTransform_getRotation(*args, **kwargs): pass def MDGModifier_doIt(*args, **kwargs): pass def MScriptUtil_getInt3ArrayItem(*args, **kwargs): pass def delete_floatPtr(*args, **kwargs): pass def MItGeometry_exactCount(*args, **kwargs): pass def MFnNurbsSurface_updateSurface(*args, **kwargs): pass def MFnCamera_setFilmRollValue(*args, **kwargs): pass def MFileObject_setFullName(*args, **kwargs): pass def delete_MTimerMessage(*args, **kwargs): pass def MFnReference_nodes(*args, **kwargs): pass def MAttributePatternArray_setLength(*args, **kwargs): pass def MNodeClass_removeFromClassification(*args, **kwargs): pass def MDistance_setUIUnit(*args, **kwargs): pass def MFnUnitAttribute_getMax(*args, **kwargs): pass def MFnMesh_numUVs(*args, **kwargs): pass def MDataHandle_setGenericChar(*args, **kwargs): pass def MDGModifier_unlinkExtensionAttributeFromPlugin(*args, **kwargs): pass def MScriptUtil_setCharArray(*args, **kwargs): pass def charPtr_frompointer(*args, **kwargs): pass def MIteratorType_getFilterType(*args, **kwargs): pass def MFloatVectorArray___getitem__(*args, **kwargs): pass def MFnNurbsSurface_setKnotInU(*args, **kwargs): pass def MFnDagNode_isInstancedAttribute(*args, **kwargs): pass def MFnCamera_setVerticalPan(*args, **kwargs): pass def MFileObject_rawName(*args, **kwargs): pass def MTime_ticksPerSecond(*args, **kwargs): pass def MAttributePattern___eq__(*args, **kwargs): pass def MNamespace_getNamespaceObjects(*args, **kwargs): pass def MFnUint64SingleIndexedComponent_element(*args, **kwargs): pass def MFnMesh_deleteEdge(*args, **kwargs): pass def MPoint_swigregister(*args, **kwargs): pass def MFnTransform_getTranslation(*args, **kwargs): pass def MDataHandle_set2Double(*args, **kwargs): pass def MMeshSmoothOptions_setSubdivisionType(*args, **kwargs): pass def MDagPath_pop(*args, **kwargs): pass def MScriptUtil_setChar(*args, **kwargs): pass def MItEdits_isReverse(*args, **kwargs): pass def MFnNurbsSurface_getKnotDomain(*args, **kwargs): pass def MFnCamera_shakeEnabled(*args, **kwargs): pass def MFileIO_cleanReference(*args, **kwargs): pass def MAttributeSpecArray_setSizeIncrement(*args, **kwargs): pass def MTime_swigregister(*args, **kwargs): pass def MModelMessage_addBeforeDuplicateCallback(*args, **kwargs): pass def MFnUInt64ArrayData_type(*args, **kwargs): pass def MFnMatrixData_swigregister(*args, **kwargs): pass def MFnAssembly_importFile(*args, **kwargs): pass def MDataHandle_setBool(*args, **kwargs): pass def MMeshSmoothOptions_setPropEdgeHardness(*args, **kwargs): pass def MScriptUtil_asShort4Ptr(*args, **kwargs): pass def MItDependencyGraph_className(*args, **kwargs): pass def MFnNurbsSurface_copy(*args, **kwargs): pass def MFnMesh_setUV(*args, **kwargs): pass def MTransformationMatrix_addRotationQuaternion(*args, **kwargs): pass def MFileIO_beforeExportFilename(*args, **kwargs): pass def MTimeArray_insert(*args, **kwargs): pass def MAttributeIndex_assign(*args, **kwargs): pass def new_MMatrixArray(*args, **kwargs): pass def MFnTripleIndexedComponent_create(*args, **kwargs): pass def MFnMatrixAttribute_type(*args, **kwargs): pass def MFloatVectorArray_length(*args, **kwargs): pass def MDataHandle_asFloat3(*args, **kwargs): pass def MMeshIntersector_swigregister(*args, **kwargs): pass def MScriptUtil_asDouble(*args, **kwargs): pass def MItDependencyGraph_atNodeLevel(*args, **kwargs): pass def MFnNurbsCurve_findParamFromLength(*args, **kwargs): pass def delete_MFnCamera(*args, **kwargs): pass def MFileIO_getReferenceConnectionsMade(*args, **kwargs): pass def MTesselationParams_setUNumber(*args, **kwargs): pass def MArrayDataHandle_swigregister(*args, **kwargs): pass def MMatrix___sub__(*args, **kwargs): pass def array2dDouble_getptr(*args, **kwargs): pass def MFnSubdNames_levelOneFaceIndexFromId(*args, **kwargs): pass def MRampAttribute_hasIndex(*args, **kwargs): pass def MFnLightDataAttribute_className(*args, **kwargs): pass def MDataHandle_asLong(*args, **kwargs): pass def MDagMessage_addAllDagChangesCallback(*args, **kwargs): pass def MProfilingScope_swigregister(*args, **kwargs): pass def delete_MItDependencyGraph(*args, **kwargs): pass def MFnNurbsCurve_cvs(*args, **kwargs): pass def MItMeshFaceVertex_getUVIndex(*args, **kwargs): pass def MFnReference_className(*args, **kwargs): pass def MFileIO_fileType(*args, **kwargs): pass def MTesselationParams_setStdFractionalTolerance(*args, **kwargs): pass def MArrayDataHandle_inputValue(*args, **kwargs): pass def MLockMessage_setPlugLockQueryCallback(*args, **kwargs): pass def MFnSubd_updateSubdSurface(*args, **kwargs): pass def MFnCamera_setClippingPlanes(*args, **kwargs): pass def MFnLayeredShader_create(*args, **kwargs): pass def MFnDagNode_fullPathName(*args, **kwargs): pass def MWeight_className(*args, **kwargs): pass def MFnMesh_getUVAtPoint(*args, **kwargs): pass def MCurveAttribute_createCurveAttr(*args, **kwargs): pass def MProfiler_getThreadDuration(*args, **kwargs): pass def delete_shortPtr(*args, **kwargs): pass def MItDag_prune(*args, **kwargs): pass def MFnNurbsCurve_createWithEditPoints(*args, **kwargs): pass def MFnReflectShader_reflectivity(*args, **kwargs): pass def MEvaluationNode_dependencyNode(*args, **kwargs): pass def MSyntax_setObjectType(*args, **kwargs): pass def MArgList_flagIndex(*args, **kwargs): pass def MItSurfaceCV_isRowDone(*args, **kwargs): pass def MFnCamera_className(*args, **kwargs): pass def MFnSubd_polygonIsValid(*args, **kwargs): pass def MFnIntArrayData___getitem__(*args, **kwargs): pass def MFnDagNode_childCount(*args, **kwargs): pass def MVector_isEquivalent(*args, **kwargs): pass def MContainerMessage_swigregister(*args, **kwargs): pass def MProfiler_getCategoryIndex(*args, **kwargs): pass def MItCurveCV_reset(*args, **kwargs): pass def MFnNumericData_setData3Short(*args, **kwargs): pass def array3dDouble_get(*args, **kwargs): pass def MFnLambertShader_setIncandescence(*args, **kwargs): pass def MEulerRotation_swigregister(*args, **kwargs): pass def MItMeshPolygon_getNormals(*args, **kwargs): pass def MStreamUtils_writeDouble(*args, **kwargs): pass def new_MArgList(*args, **kwargs): pass def MItSubdFace_index(*args, **kwargs): pass def MFnSubd_updateAllEditsAndCreases(*args, **kwargs): pass def MFnGeometryData_addObjectGroup(*args, **kwargs): pass def MFnDependencyNode_dgTimerReset(*args, **kwargs): pass def MComputation_setProgress(*args, **kwargs): pass def MProfiler_eventBegin(*args, **kwargs): pass def MPoint_rationalize(*args, **kwargs): pass def MInt64Array_sizeIncrement(*args, **kwargs): pass def MFnNumericData_className(*args, **kwargs): pass def new_MFnLambertShader(*args, **kwargs): pass def MEulerRotation_boundIt(*args, **kwargs): pass def MParentingEdit_parentName(*args, **kwargs): pass def MArgParser_commandArgumentInt(*args, **kwargs): pass def MItSubdEdge_level(*args, **kwargs): pass def MFnSubd_vertexBaseMeshGetWithId(*args, **kwargs): pass def MFnSubdNames_base(*args, **kwargs): pass def MGlobal_className(*args, **kwargs): pass def MFnDependencyNode_canBeWritten(*args, **kwargs): pass def MVectorArray_remove(*args, **kwargs): pass def delete_MCommandMessage(*args, **kwargs): pass def MPointArray_swigregister(*args, **kwargs): pass def MIntArray___add__(*args, **kwargs): pass def MFnNumericAttribute_hasMin(*args, **kwargs): pass def MFnAttribute_setUsesArrayDataBuilder(*args, **kwargs): pass def MFnCamera_viewDirection(*args, **kwargs): pass def MEulerRotation___add__(*args, **kwargs): pass def MSelectionList_className(*args, **kwargs): pass def new_MArgParser(*args, **kwargs): pass def MItSelectionList_getDependNode(*args, **kwargs): pass def MFnSubd_editsUpdateAll(*args, **kwargs): pass def MFnExpression_swigregister(*args, **kwargs): pass def MFnDependencyNode_isFromReferencedFile(*args, **kwargs): pass def MMessage_currentCallbackId(*args, **kwargs): pass def new_MPointArray(*args, **kwargs): pass def MMeshSmoothOptions_keepHardEdge(*args, **kwargs): pass def MIntArray_remove(*args, **kwargs): pass def MFnVolumeLight_coneEndRadius(*args, **kwargs): pass def MMeshIsectAccelParams_swigregister(*args, **kwargs): pass def MFnAttribute_setConnectable(*args, **kwargs): pass def MDoubleArray___getitem__(*args, **kwargs): pass def delete_MSelectionList(*args, **kwargs): pass def MAngle_unit(*args, **kwargs): pass def MItMeshVertex_getConnectedEdges(*args, **kwargs): pass def MFnSubdData_type(*args, **kwargs): pass def MTimerMessage_className(*args, **kwargs): pass def MFnEnumAttribute_getDefault(*args, **kwargs): pass def MFnDependencyNode_typeId(*args, **kwargs): pass def MMessageNode_fServerPtr_set(*args, **kwargs): pass def MPlug_asInt(*args, **kwargs): pass def MImage_filter(*args, **kwargs): pass def MFnMesh_setBinaryBlindData(*args, **kwargs): pass def MFnAttribute_isHidden(*args, **kwargs): pass def MFnSubdNames_corner(*args, **kwargs): pass def delete_MDoubleArray(*args, **kwargs): pass def MRichSelection_setSelection(*args, **kwargs): pass def MFnNurbsSurface_tesselate(*args, **kwargs): pass def delete_MSpace(*args, **kwargs): pass def MItMeshVertex_currentItem(*args, **kwargs): pass def MFnStringArrayData_className(*args, **kwargs): pass def MFnDoubleIndexedComponent_addElements(*args, **kwargs): pass def MFloatVector_y_set(*args, **kwargs): pass def MURI_getPassword(*args, **kwargs): pass def MColor___neg__(*args, **kwargs): pass def MFnCamera_set(*args, **kwargs): pass def MFnArrayAttrsData_getDoubleData(*args, **kwargs): pass def MDistance_asInches(*args, **kwargs): pass def MConnectDisconnectAttrEdit_isConnection(*args, **kwargs): pass def MMeshSmoothOptions_openSubdivVertexBoundary(*args, **kwargs): pass def MFnMesh_setFaceVertexNormal(*args, **kwargs): pass def MURI_setAuthority(*args, **kwargs): pass def MColorArray_clear(*args, **kwargs): pass def MFnTransform_restPosition(*args, **kwargs): pass def array4dDouble_swigregister(*args, **kwargs): pass def MEdit_hasEditData(*args, **kwargs): pass def MFloatVectorArray_copy(*args, **kwargs): pass def MUintArray___radd__(*args, **kwargs): pass def MItMeshPolygon_reset(*args, **kwargs): pass def MFnCamera_setCenterOfInterest(*args, **kwargs): pass def MDGContext_className(*args, **kwargs): pass def MFnCompoundAttribute_swigregister(*args, **kwargs): pass def MFnMesh_deleteColorSet(*args, **kwargs): pass def MFloatPoint_y_get(*args, **kwargs): pass def MUintArray_length(*args, **kwargs): pass def MPlug_isCompound(*args, **kwargs): pass def MGlobal_removeOptionVar(*args, **kwargs): pass def MFloatVector___itruediv__(*args, **kwargs): pass def MFnCamera_setHorizontalFilmOffset(*args, **kwargs): pass def MUint64Array_className(*args, **kwargs): pass def MPlugArray_copy(*args, **kwargs): pass def delete_MFn(*args, **kwargs): pass def MDagPath___eq__(*args, **kwargs): pass def new_MRampAttribute(*args, **kwargs): pass def MItMeshPolygon_polygon(*args, **kwargs): pass def MTypeId_className(*args, **kwargs): pass def MFnTransform_create(*args, **kwargs): pass def MObjectHandle_objectHashCode(*args, **kwargs): pass def delete_MItMeshFaceVertex(*args, **kwargs): pass def MTrimBoundaryArray_size(*args, **kwargs): pass def MObjectArray_copy(*args, **kwargs): pass def MFloatVector___iadd__(*args, **kwargs): pass def MFileObject_setRawFullName(*args, **kwargs): pass def MFnCamera_setFStop(*args, **kwargs): pass def MMeshSmoothOptions_subdivisionType(*args, **kwargs): pass def MTransformationMatrix_setRotatePivot(*args, **kwargs): pass def MNurbsIntersector_swigregister(*args, **kwargs): pass def boolPtr_frompointer(*args, **kwargs): pass def MProfiler_eventDataAvailable(*args, **kwargs): pass def MDGModifier_removeExtensionAttribute(*args, **kwargs): pass def MFnSubdNames_baseFaceIdFromIndex(*args, **kwargs): pass def floatPtr_assign(*args, **kwargs): pass def MTimerMessage_swigregister(*args, **kwargs): pass def MFnCamera_setVerticalFilmAperture(*args, **kwargs): pass def MAttributePatternArray___getitem__(*args, **kwargs): pass def charPtr_swigregister(*args, **kwargs): pass def delete_MDGModifier(*args, **kwargs): pass def MTransformationMatrix_rotation(*args, **kwargs): pass def MScriptUtil_setInt3ArrayItem(*args, **kwargs): pass def MTime_assign(*args, **kwargs): pass def MFnSubdNames_fromSelectionIndices(*args, **kwargs): pass def MAttributeSpecArray_sizeIncrement(*args, **kwargs): pass def MDataHandle_setChar(*args, **kwargs): pass def MFnCamera_aspectRatio(*args, **kwargs): pass def MFnNurbsSurface_getDataObject(*args, **kwargs): pass def MNodeMessage_addUuidChangedCallback(*args, **kwargs): pass def MTimeArray_append(*args, **kwargs): pass def delete_MMatrixArray(*args, **kwargs): pass def MFnAssembly_postLoad(*args, **kwargs): pass def MFloatVector_length(*args, **kwargs): pass def new_MPointOnMesh(*args, **kwargs): pass def MScriptUtil_asUint(*args, **kwargs): pass def MItDependencyGraph_currentLevel(*args, **kwargs): pass def MTesselationParams_setVNumber(*args, **kwargs): pass def delete_MAttributeIndex(*args, **kwargs): pass def MObject___ne__(*args, **kwargs): pass def MMatrix___imul__(*args, **kwargs): pass def MFnDagNode_drawOverrideEnabled(*args, **kwargs): pass def MDataHandle_asInt(*args, **kwargs): pass def MDagMessage_addAllDagChangesDagPathCallback(*args, **kwargs): pass def MPolyMessage_addPolyComponentIdChangedCallback(*args, **kwargs): pass def MItDependencyGraph_reset(*args, **kwargs): pass def MTransformationMatrix_addRotation(*args, **kwargs): pass def new_MFnReference(*args, **kwargs): pass def MTesselationParams_setStdMinEdgeLength(*args, **kwargs): pass def MArrayDataHandle_outputValue(*args, **kwargs): pass def MLockMessage_className(*args, **kwargs): pass def MFnSubd_vertexBaseMeshAddWithIndex(*args, **kwargs): pass def MFnEnumAttribute_fieldName(*args, **kwargs): pass def MFnDagNode_partialPathName(*args, **kwargs): pass def MWeight_swigregister(*args, **kwargs): pass def MFnMesh_removeFaceVertexColors(*args, **kwargs): pass def MProfiler_getBufferSize(*args, **kwargs): pass def MFnNurbsCurve_create(*args, **kwargs): pass def MFnContainerNode_makeCurrent(*args, **kwargs): pass def MArgList_addArg(*args, **kwargs): pass def MItSurfaceCV_next(*args, **kwargs): pass def MFloatVector_isEquivalent(*args, **kwargs): pass def MVector_isParallel(*args, **kwargs): pass def MItCurveCV_position(*args, **kwargs): pass def MFnNumericData_setData3Int(*args, **kwargs): pass def MObject_swigregister(*args, **kwargs): pass def MFnTripleIndexedComponent_addElement(*args, **kwargs): pass def MStreamUtils_readChar(*args, **kwargs): pass def delete_MArgList(*args, **kwargs): pass def MFnGeometryData_removeObjectGroup(*args, **kwargs): pass def MItMeshPolygon_getUV(*args, **kwargs): pass def MPoint_homogenize(*args, **kwargs): pass def MInt64Array_className(*args, **kwargs): pass def MEulerRotation_alternateSolution(*args, **kwargs): pass def MParentingEdit_editType(*args, **kwargs): pass def MItSubdEdge_setLevel(*args, **kwargs): pass def MFnMesh_setColors(*args, **kwargs): pass def MFnSubd_vertexBaseMeshSetWithId(*args, **kwargs): pass def MFnGenericAttribute_addDataAccept(*args, **kwargs): pass def MUuid_generate(*args, **kwargs): pass def MVectorArray_insert(*args, **kwargs): pass def new_MPoint(*args, **kwargs): pass def MIntArray___radd__(*args, **kwargs): pass def MFnNumericAttribute_hasMax(*args, **kwargs): pass def MFnAttribute_setInternal(*args, **kwargs): pass def MFnCamera_setShakeEnabled(*args, **kwargs): pass def MSelectionList_swigregister(*args, **kwargs): pass def delete_MArgParser(*args, **kwargs): pass def MItSelectionList_getDagPath(*args, **kwargs): pass def MFnSubd_levelMaxCurrent(*args, **kwargs): pass def MMessage_nodeCallbacks(*args, **kwargs): pass def delete_MPointArray(*args, **kwargs): pass def MDGModifier_commandToExecute(*args, **kwargs): pass def new_MFnNumericData(*args, **kwargs): pass def MFnMessageAttribute_type(*args, **kwargs): pass def MFnAttribute_setStorable(*args, **kwargs): pass def MDoubleArray___delitem__(*args, **kwargs): pass def MSelectionList_clear(*args, **kwargs): pass def MAngle_value(*args, **kwargs): pass def MItMeshVertex_getConnectedVertices(*args, **kwargs): pass def MPolyMessage_className(*args, **kwargs): pass def MFnEnumAttribute_defaultValue(*args, **kwargs): pass def MFnDependencyNode_typeName(*args, **kwargs): pass def MUserData_deleteAfterUse(*args, **kwargs): pass def MMessageNode_fServerPtr_get(*args, **kwargs): pass def MPlug_asShort(*args, **kwargs): pass def MImage_writeToFile(*args, **kwargs): pass def MFnMesh_clearBlindData(*args, **kwargs): pass def MFnAttribute_isUsedAsColor(*args, **kwargs): pass def MFnTripleIndexedComponent_setCompleteData(*args, **kwargs): pass def MRichSelection_className(*args, **kwargs): pass def MFnAmbientLight_ambientShade(*args, **kwargs): pass def MSpace_swigregister(*args, **kwargs): pass def MItMeshVertex_position(*args, **kwargs): pass def new_MFnStringArrayData(*args, **kwargs): pass def MFnDoubleIndexedComponent_getElement(*args, **kwargs): pass def MURI_getHost(*args, **kwargs): pass def MColor___sub__(*args, **kwargs): pass def MPlug_setMTime(*args, **kwargs): pass def MFnCamera_setVerticalShake(*args, **kwargs): pass def
formatpost(): """ Formats the posted data into wanted format The data should be a list Currently only working for saol """ # get and parse data request.get_data() data = request.data try: data = json.loads(data) except ValueError as e: raise errors.KarpParsingError(str(e)) # set all allowed lexicons (to avoid authentication exception user_is_authorized, permitted = auth.validate_user(mode="read") # find the wanted format settings = parser.make_settings(permitted, {"size": 25}, user_is_authorized=user_is_authorized) parser.parse_extra(settings) to_format = settings.get("format", "") mode = parser.get_mode() _logger.debug('mode "%s"', mode) index, typ = conf_mgr.get_mode_index(mode) if to_format: if not isinstance(data, list): data = [data] errmsg = "Unkown format %s for mode %s" % (settings["format"], mode) format_list = conf_mgr.extra_src(mode, "format_list", helpers.notdefined(errmsg)) ok, html = format_list(data, conf_mgr.elastic(mode=mode), settings["format"], index) return jsonify({"all": len(data), "ok": ok, "data": html}) else: raise errors.KarpQueryError("Unkown format %s" % to_format) def autocomplete(): """ Returns lemgrams matching the query text. Each mode specifies in the configs which fields that should be considered. The parameter 'q' or 'query' is used when only one word form is to be processed. The parameter 'multi' is used when multiple word forms should be processed. The format of result depends on which flag that is set. """ user_is_authorized, permitted = auth.validate_user(mode="read") # query = request.query_string try: settings = parser.make_settings( permitted, {"size": 1000}, user_is_authorized=user_is_authorized ) mode = parser.get_mode() resource = parser.parse_extra(settings) if "q" in request.args or "query" in request.args: qs = [request.args.get("q", "") or request.args.get("query", "")] _logger.debug("qs is %s", qs) multi = False else: # check if there are multiple words forms to complete qs = settings.get("multi", []) _logger.debug("qs %s", qs) multi = True filters = [] # filters will be put here if not settings.get("user_is_authorized", False): filter_unauth_user = conf_mgr.filter_for_unauth_user(mode) if filter_unauth_user is not None: filters.append({"term": filter_unauth_user}) # use utf8, escape '"' qs = [re.sub('"', '\\"', q) for q in qs] headboost = conf_mgr.searchfield(mode, "boosts")[0] res = {} single_response = {} # if multi is not true, only one iteration of this loop will be done for q in qs: boost = {"term": {headboost: {"boost": "500", "value": q}}} autocompleteq = conf_mgr.extra_src(mode, "autocomplete", autocompletequery) exp = autocompleteq(mode, boost, q) autocomplete_field = conf_mgr.searchonefield(mode, "autocomplete_field") autocomplete_fields = conf_mgr.searchfield(mode, "autocomplete_field") fields = {"exists": {"field": autocomplete_field}} # last argument is the 'fields' used for highlightning elasticq = parser.search([exp, fields, resource], filters, "", usefilter=True) _logger.debug("Will send %s", elasticq) es = conf_mgr.elastic(mode=mode) _logger.debug("_source: %s", autocomplete_field) _logger.debug(elasticq) index, typ = conf_mgr.get_mode_index(mode) single_response = parser.adapt_query( settings["size"], 0, es, elasticq, {"size": settings["size"], "index": index, "_source": autocomplete_fields,}, ) # save the results for multi res[q] = single_response if multi: return jsonify(res) else: # single querys: only return the latest answer return jsonify(single_response) except AuthenticationError as e: _logger.exception(e) msg = e.message raise errors.KarpAuthenticationError(msg) except errors.KarpException as e: # pass on karp exceptions _logger.exception(e) raise except Exception as e: # catch *all* exceptions _logger.exception(e) raise errors.KarpGeneralError("Unknown error", debug_msg=e, query=request.query_string) # standard autocomplete def autocompletequery(mode, boost, q): """ Constructs an autocompletion query, searching for lemgrams starting with 'text' Returns a query object to be sent to elastic search """ # other modes: don't care about msd look_in = [boost] for boost_field in conf_mgr.searchfield(mode, "boosts"): look_in.append({"match_phrase": {boost_field: q}}) exp = {"bool": {"should": look_in}} return exp def clean_highlight(ans): stop_offset = 9 # The number of extra tokens added by the <em> tags for n, hit in enumerate(ans.get("hits", {}).get("hits", [])): # _logger.debug('CLEAN hit %s\n\n\n' % hit) for field, texts in hit.get("highlight", {}).items(): # _logger.debug('CLEAN texts %s: %s' % (field, texts)) if field == "lexiconName": del ans["hits"]["hits"][n]["highlight"][field] else: newtexts = chain(*[re.finditer("<em>(.*?)</em>", t) for t in texts]) spans = [] for new in newtexts: spans.append((new.group(1), new.span()[0], new.span()[1] - stop_offset)) ans["hits"]["hits"][n]["highlight"][field] = spans ans["hits"]["highlight"] = "ON" def lexiconorder(): orderlist = {} for name, val in conf_mgr.lexicons.items(): orderlist[name] = val.get("order", "-1") return jsonify(orderlist) def modeinfo(mode): return jsonify(conf_mgr.fields.get(mode, {})) def lexiconinfo(lexicon): return jsonify(conf_mgr.fields.get(conf_mgr.get_lexicon_mode(lexicon), {})) # For debugging def testquery(): """ Returns the query expressed in elastics search api """ user_is_authorized, permitted = auth.validate_user(mode="read") try: # default settings = parser.make_settings( permitted, {"size": 25, "page": 0}, user_is_authorized=user_is_authorized ) elasticq = parser.parse(settings) mode = settings["mode"] if not settings.get("sort", ""): # default: group by lexicon, then sort by score sort = conf_mgr.searchfield(mode, "sort_by") else: sort = settings["sort"] start = settings["start"] if "start" in settings else settings["page"] * settings["size"] elasticq = parser.parse(settings) return json.dumps(elasticq) + json.dumps( {"sort": sort, "_from": start, "size": settings["size"], "version": "true"} ) except Exception as e: # catch *all* exceptions # TODO only catch relevant exceptions _logger.exception(e) raise errors.KarpGeneralError(e, request.query_string) def get_context(lexicon): """ Find and return the alphabetically (or similar, as specified for the lexicon) context of a word/entry. """ user_is_authorized, permitted = auth.validate_user(mode="read") if lexicon not in permitted: raise errors.KarpAuthenticationError( "You are not allowed to search the " "lexicon %s" % lexicon ) # make default settings settings = parser.make_settings( permitted, {"size": 10, "resource": lexicon}, user_is_authorized=user_is_authorized ) # parse parameter settings parser.parse_extra(settings) # set searching configurations mode = conf_mgr.get_lexicon_mode(lexicon) settings["mode"] = mode es = conf_mgr.elastic(mode=mode) index, typ = conf_mgr.get_mode_index(mode) # get the sort_by list (eg. ['baseform.sort', 'lemmaid.search']) # leave out lexiconOrder and _score sortfieldnames = [ field for field in conf_mgr.searchconf(mode, "sort_by") if field not in ["_score", "lexiconOrder"] ] _logger.debug("|get_context| sortfieldnames = %s", sortfieldnames) # get the sort field paths (eg. ['FormRep.baseform.raw', 'lemmaid.raw']) # Used for sorting. sortfield = sum([conf_mgr.lookup_multiple(f, mode) for f in sortfieldnames], []) # get the field name of the head sort field. Used for searching sortfieldname = sortfieldnames[0] filters = [] # filters will be put here print(f"mode = {mode}") if not settings.get("user_is_authorized", False): filter_unauth_user = conf_mgr.filter_for_unauth_user(mode) print(f"filter_unauth_user = {filter_unauth_user}") if filter_unauth_user is not None: filters.append({"term": filter_unauth_user}) print(f"filters = {filters}") # find the center entry (by its id) if "center" in settings: center_id = settings["center"] id_query = {"term": {"_id": center_id}} if filters: body_query = {"query": {"bool": {"must": id_query, "filter": filters}}} else: body_query = {"query": id_query} lexstart = es.search( index=index, doc_type=typ, size=1, body=body_query, sort=["%s:asc" % f for f in sortfield], ) # if no center id is given, pick the first entry of the lexicon else: exps = [] parser.parse_ext("and|resource|equals|%s" % lexicon, exps, [], mode) center_q = parser.search(exps, filters, [], usefilter=True, constant_score=False) center_q = {"query": center_q} lexstart = es.search( index=index, doc_type=typ, size=1, body=center_q, sort=["%s:asc" % f for f in sortfield], ) center_id = lexstart["hits"]["hits"][0]["_id"] # lexstart = es.search(index=index, doc_type=typ, size=1, # sort=['%s:asc' % f for f in sortfield]) # center_id = lexstart['hits']['hits'][0]['_id'] if not lexstart["hits"]["hits"]: _logger.error("No center found %s, %s", center_id, lexstart) raise errors.KarpElasticSearchError("Could not find entry %s" % center_id) centerentry = lexstart["hits"]["hits"][0] _logger.debug("center %s, %s", centerentry, centerentry["_id"]) origentry_sort = [key for key in centerentry["sort"] if key is not None][0] # TODO what to do if the sort key is not in the lexicon? as below? # origentry_sort = centerentry['sort'][0] # TODO test this! if not isinstance(origentry_sort, str): origentry_sort = str(origentry_sort) sortvalue = util.escape_control(origentry_sort) _logger.debug("Orig entry escaped key %s", sortvalue) # Construct queries to ES exps = [] # the query string from the user querystring = settings.get("q", "") parser.parse_ext("and|resource|equals|%s" % lexicon, exps, [], mode) if querystring: if querystring.startswith("simple"): querystring = "and|anything|equals|%s" % querystring.split("|")[-1] else: querystring = re.sub(r"extended\|\|", "", querystring) parser.parse_ext(querystring, exps, [], mode) preexps = copy.deepcopy(exps) # deep copy for the pre-query hits_post = get_pre_post( exps, center_id, sortfield, sortfieldname, sortvalue, mode, settings["size"], es, index, place="post", filters=filters, ) hits_pre = get_pre_post( preexps, center_id, sortfield, sortfieldname, sortvalue, mode, settings["size"], es, index, place="pre", filters=filters, ) return jsonify( { "pre": hits_pre[: settings["size"]], "post": hits_post[: settings["size"]], "center": centerentry, } ) def get_pre_post( exps, center_id, sortfield, sortfieldname, sortvalue, mode, size: int, es, index, place="post", filters=None, ): op = {"post": {"op": "gte", "sort": "asc"}, "pre": {"op": "lte", "sort": "desc"}} parser.parse_ext("and|%s|%s|%s" % (sortfieldname, op[place]["op"], sortvalue), exps, [], mode) elasticq_q = parser.search( exps, filters if filters else [], [], usefilter=False, constant_score=True ) # +1 to compensate for the word itself being in the context size = size + 1 show = conf_mgr.searchfield(mode, "minientry_fields") for _i, _v in enumerate(show): if _v == "Corpus_unit_id.raw": show[_i] = "Corpus_unit_id" _logger.debug("searching.py:get_pre_post show = %s", show) # TODO size*3 (magic number) because many entries may have the same
<filename>src/db.py #################################################################################################### ### ### ### Functions for database handling (fetching, ...) ### ### Author: <NAME> (EPFL) ### ### Last modified: 03.09.2021 ### ### ### #################################################################################################### # Import libraries import numpy as np import os import subprocess as sbp import time # Import local libraries import graph as gr def fetch_entries(db_root, elem, atoms, envs, Gs, max_w, N_min=10, nei_elem=None, exclude=None, verbose=False): """ Find the database entries corresponding to each graph, with a minimum number of instances. Also retrieve the crystal identifier and index of the atom associated with each database entry. Inputs: - db_root Root directory of the database - elem Element of the central nodes of the graphs - elems List of atoms in the molecule - envs Environment of each graph (first coordination shell) - Gs List of graphs to fetch the database for - max_w Maximum depth - N_min Minimum number of entries in the database required - nei_elem "None" if we only want to retrieve the shifts of the central atom, otherwise element of the neighbour to extract shift distributions from - exclude List of crystal identifiers to exclude - verbose Whether additional information about the search should be printed Outputs: - all_shifts List of predicted shifts for each graph - all_errs List of prediction errors for each graph - ws List of maximum depth for each graph - labels List of graph labels - all_crysts List of the crystals corresponding to the shifts extracted - all_inds List of the indices of the atoms corresponding to the shifts extracted - hashes List of hashes identifying the graphs """ # Initialize arrays all_shifts = [] all_errs = [] ws = [] labels = [] all_crysts = [] all_inds = [] hashes = [] # Get the directory db_dir = db_root + elem + "/" # Loop over each graph for i, (G, env) in enumerate(zip(Gs, envs)): start = time.time() # Get the number of neighbouring elements in the environment num_nei = 0 if nei_elem is not None: nei_elems = env.split("-") num_nei = nei_elems.count(nei_elem) # Get the directory db_dir = db_root + elem + "-" + nei_elem + "/" # Check if database directory exists if not os.path.exists(db_dir): raise ValueError("Directory does not exist: {}".format(db_dir)) # If there are neighbours that correspond to the element, extract the 2D shifts if num_nei > 0: if not os.path.exists(db_dir + env + ".csv"): raise ValueError("File does not exist: {}".format(db_dir + env + ".csv")) # Loop over all neighbours for j in range(1, len(nei_elems)+1): if G.nodes[j]["elem"] == nei_elem: this_w = max_w # Generate arborescence (array of hashes) arb = [] for w in range(2, max_w+1): cut_G = gr.cut_graph(G, w) cut_G.nodes[j]["elem"] = "Z" arb.append(gr.generate_hash(cut_G)) # If the arborescence was already found before, get the corresponding shifts directly if ",".join(arb) in hashes: h_ind = hashes.index(",".join(arb)) hashes.append(",".join(arb)) this_w = ws[h_ind] # Append the array of shifts and errors for this distribution all_shifts.append(all_shifts[h_ind]) all_errs.append(all_errs[h_ind]) ws.append(this_w) labels.append("{}{}-{}{}".format(elem, i+1, nei_elem, atoms[:G.nodes[j]["ind"]].count(nei_elem)+1)) all_inds.append(all_inds[h_ind]) all_crysts.append(all_crysts[h_ind]) # Otherwise, search through the database else: hashes.append(",".join(arb)) # Initialize array of shifts and errors shifts = [] errs = [] inds = [] crysts = [] # Get the entries of the corresponding graph p = sbp.Popen(["grep", ",".join(arb), db_dir + env + ".csv"], stdout=sbp.PIPE) out, err = p.communicate() out = out.decode("UTF-8") for l in out.split("\n"): if len(l) > 0: tmp = l.split(",") if (exclude is None or tmp[0] not in exclude) and tmp[0] != "crystal": crysts.append(tmp[0]) inds.append([int(tmp[1]), int(tmp[4])]) shifts.append([float(tmp[2]), float(tmp[5])]) errs.append([float(tmp[3]), float(tmp[6])]) # If there is not enough entries, reduce the depth and try again while len(shifts) < N_min: if verbose: print(" w = {}: {} instances are not enough, reducing graph depth...".format(this_w, len(shifts))) shifts = [] errs = [] inds = [] crysts = [] # Update the depth and the corresponding arborescence this_w -= 1 arb = arb[:-1] # Get the entries of the corresponding graph p = sbp.Popen(["grep", ",".join(arb), db_dir + env + ".csv"], stdout=sbp.PIPE) out, err = p.communicate() out = out.decode("UTF-8") for l in out.split("\n"): if len(l) > 0: tmp = l.split(",") if (exclude is None or tmp[0] not in exclude) and tmp[0] != "crystal": shifts.append([float(tmp[2]), float(tmp[5])]) errs.append([float(tmp[3]), float(tmp[6])]) inds.append([int(tmp[1]), int(tmp[4])]) crysts.append(tmp[0]) # Append the array of shifts and errors for this distribution all_shifts.append(np.array(shifts)) all_errs.append(np.array(errs)) ws.append(this_w) labels.append("{}{}-{}{}".format(elem, i+1, nei_elem, atoms[:G.nodes[j]["ind"]].count(nei_elem)+1)) all_inds.append(inds) all_crysts.append(crysts) stop = time.time() print(" Graph {}/{} found. w = {}, {} instances. Time elapsed: {:.2f} s".format(i+1, len(Gs), this_w, len(all_shifts[-1]), stop-start)) # If the neighbouring element is not set, extract the 1D shfits elif nei_elem is None: this_w = max_w # Generate arborescence (array of hashes) arb = [] for w in range(2, max_w+1): cut_G = gr.cut_graph(G, w) arb.append(gr.generate_hash(cut_G)) # If the arborescence was already found, reuse the previously extracted shifts to save time if ",".join(arb) in hashes: h_ind = hashes.index(",".join(arb)) hashes.append(",".join(arb)) this_w = ws[h_ind] # Append the array of shifts and errors for this distribution all_shifts.append(all_shifts[h_ind]) all_errs.append(all_errs[h_ind]) ws.append(this_w) labels.append("{}{}".format(elem, i+1)) all_inds.append(all_inds[h_ind]) all_crysts.append(all_crysts[h_ind]) else: hashes.append(",".join(arb)) # Initialize array of shifts, errors, crystal structures and atomic indices shifts = [] errs = [] crysts = [] inds = [] # Get the entries of the corresponding graph p = sbp.Popen(["grep", ",".join(arb), db_dir + env + ".csv"], stdout=sbp.PIPE) out, err = p.communicate() out = out.decode("UTF-8") for l in out.split("\n"): if len(l) > 0: tmp = l.split(",") if (exclude is None or tmp[0] not in exclude) and tmp[0] != "crystal": crysts.append(tmp[0]) inds.append(int(tmp[1])) shifts.append(float(tmp[2])) errs.append(float(tmp[3])) # If there is not enough entries, reduce the depth and try again while len(shifts) < N_min: if verbose: print(" w = {}: {} instances are not enough, reducing graph depth...".format(this_w, len(shifts))) shifts = [] errs = [] inds = [] crysts = [] # Reduce the depth and update the arborescence this_w -= 1 arb = arb[:-1] # Get the entries of the corresponding graph p = sbp.Popen(["grep", ",".join(arb), db_dir + env + ".csv"], stdout=sbp.PIPE) out, err = p.communicate() out = out.decode("UTF-8") for l in out.split("\n"): if len(l) > 0: tmp = l.split(",") if (exclude is None or tmp[0] not in exclude) and tmp[0] != "crystal": crysts.append(tmp[0]) inds.append(int(tmp[1])) shifts.append(float(tmp[2])) errs.append(float(tmp[3])) # Append the array of shifts and error for this distribution all_shifts.append(np.array(shifts)) all_errs.append(np.array(errs)) ws.append(this_w) labels.append("{}{}".format(elem, i+1)) all_inds.append(inds) all_crysts.append(crysts) stop = time.time() print(" Graph {}/{} found. w = {}, {} instances. Time elapsed: {:.2f} s".format(i+1, len(Gs), this_w, len(all_shifts[-1]), stop-start)) else: print(" Graph {}/{} has no neighbouring {}.".format(i+1, len(Gs), nei_elem)) return all_shifts, all_errs, ws, labels, all_crysts, all_inds, hashes def fetch_entries_from_hashes(db_root, elem, envs, Hs, max_w, N_min=10, nei_elem=None, exclude=None, verbose=False): """ Find the database entries corresponding to each graph, with a minimum number of instances. Also retrieve the crystal identifier and index of the atom associated with each database entry. Inputs: - db_root Root directory of the database - elem Element of the central nodes of the graphs - envs Environment of each graph (first coordination shell) - Hs List of graphs hashes to fetch the database for - w_max Maximum depth - N_min Minimum number of entries for each graph - nei_elem "None" if we only want to retrieve the shifts of the central atom, otherwise element of the neighbour to extract shift distributions from - exclude List of crystal identifiers to exclude - verbose Whether additional information about the search should be printed Outputs: - all_shifts List of predicted shifts for each graph - all_errs List of prediction errors for each graph - ws List of maximum depth for each graph - all_crysts List of the crystals corresponding to the shifts extracted - all_inds List of the indices of the atoms corresponding to the shifts extracted """ # Initialize arrays all_shifts = [] all_errs = [] ws = [] labels = [] all_crysts = [] all_inds = [] hashes = [] # Get the directory
(content_id is not None and grade is not None): # i.e. 0 attempts - date_id = date_str[0:len(date_str)-4] post_date = datetime.datetime.strptime(date_id, "%Y-%m-%d %H:%M:%S") #2014-07-11 16:52:53 EST unixtimestamp = time.mktime(post_date.timetuple()) date_id_index = datetime.datetime.strftime(post_date, "%d-%b-%y") #time.strftime("%d-%b-%y", cur_visit_date) time_id = datetime.datetime.strftime(post_date, "%H:%M:%S") attempt_row = {"course_id": course_id, "content_id": content_id, "grade": grade, "user_id": user_id, "unixtimestamp": unixtimestamp} save_summaryobject ("dim_submissionattempts", attempt_row) if content_link_id is not None: content_link_id_to_content_id_dict[content_link_id] = content_id #save all attempts as pageviews in fact_coursevisits user_pk = str(course_id) + "_" + user_id page_pk = str(course_id) + "_" + content_id fact_row = {"date_id": date_id_index, "time_id": time_id, "course_id": course_id, "datetime": unixtimestamp, "user_id": user_id, "module": 'assessment/x-bb-qti-test', "action": 'COURSE_ACCESS', "page_id": content_id, "pageview":1, "user_pk": user_pk, "page_pk": page_pk, "unixtimestamp": unixtimestamp} save_summaryobject ("fact_coursevisits", fact_row) return content_link_id_to_content_id_dict def get_contenthandle_blackboard(filepath): tree = ET.ElementTree(file=filepath) root = tree.getroot() content_handle = "" for elem in tree.iter(tag='INTERNALHANDLE'): content_handle = elem.attrib["value"] return content_handle def get_actual_contenttype_blackboard(filepath): tree = ET.ElementTree(file=filepath) root = tree.getroot() content_type = "" for elem in tree.iter(tag='CONTENTHANDLER'): content_type = elem.attrib["value"] return content_type def process_blackboard_memberships(filepath, course_id): tree = ET.ElementTree(file=filepath) root = tree.getroot() member_to_user_dict = {} for elem in tree.iter(tag='COURSEMEMBERSHIP'): member_id = elem.attrib["id"] member_id = member_id[1:-2] user_id = 0 for usr in elem: if usr.tag == "USERID": user_id = usr.attrib["value"] user_id = user_id[1:-2] member_to_user_dict[member_id] = user_id return member_to_user_dict def process_blackboard_test(filepath, content_id, title, course_id, content_type): timeopen = 0 timeclose = 0 grade = 0.0 content_id = content_id[1:-2] tree = ET.ElementTree(file=filepath) root = tree.getroot() for elem in tree.iter(tag='qmd_absolutescore_max'): grade = elem.text submissiontype_row = {"course_id": course_id, "content_id": content_id, "content_type": content_type, "timeopen": timeopen, "timeclose": timeclose, "grade": grade} save_summaryobject ("dim_submissiontypes", submissiontype_row) def process_blackboard_conferences(filepath, content_id, title, course_id): # Save Forum = Conferences entry forum_row = {"course_id": course_id, "forum_id": content_id, "title": "Conferences", "no_discussions": 0} save_summaryobject ("summary_forum", forum_row) tree = ET.ElementTree(file=filepath) root = tree.getroot() conf_id = "" title = "" for elem in tree.iter(tag='CONFERENCE'): conf_id = elem.attrib["id"] conf_id = conf_id[1:len(conf_id)-2] for child_of_root in elem: if child_of_root.tag == "TITLE": title = child_of_root.attrib["value"] # Save Forum - Discussion Board discussion_row = {"course_id": course_id, "forum_id": conf_id, "discussion_id": content_id, "title": title.replace("'", "''"), "no_posts": 0} save_summaryobject ("summary_discussion", discussion_row) def process_blackboard_forum(filepath, content_id, title, course_id): global msg_to_forum_dict tree = ET.ElementTree(file=filepath) root = tree.getroot() forum_id = root.attrib['id'] forum_id = forum_id[1:len(forum_id)-2] conf_id = "" title = "" for elem in root: if elem.tag == "CONFERENCEID": conf_id = elem.attrib["value"] conf_id = conf_id[1:len(conf_id)-2] if elem.tag == "TITLE": title = elem.attrib["value"] # Get all posts for msg in tree.iter(tag='MSG'): post_id = msg.attrib['id'][1:-2] post_title = "" date_id = "" user_id = "" for elem in msg: if elem.tag == "TITLE": post_title = elem.attrib["value"] if elem.tag == "USERID": user_id = elem.attrib["value"] for subelem in elem: if subelem.tag == "CREATED": date_id = subelem.attrib["value"] if (date_id is not None) and (len(date_id)!=0) and (date_id!=" ") and (date_id!=""): date_id = date_id[0:len(date_id)-4] post_date = datetime.datetime.strptime(date_id, "%Y-%m-%d %H:%M:%S") #2014-07-11 16:52:53 EST date_id = datetime.datetime.strftime(post_date, "%d-%b-%y") user_id = user_id[1:len(user_id)-2] post_row = {"date_id": date_id, "user_id": user_id, "course_id": course_id, "forum_id": forum_id, "discussion_id": conf_id} save_summaryobject ("summary_posts", post_row) def populate_summary_contentaggregatesbycourse_table(resource_type_dict, course_id): for key in resource_type_dict: row = {"contenttype": key, "count": resource_type_dict[key], "course_id": course_id} save_summaryobject ("summary_contentaggregatesbycourse", row) def getID_fromResourceFile(resource_file, type=None): id = '0' tree = ET.ElementTree(file=resource_file) root = tree.getroot() if type == "assessment/x-bb-qti-test": for elem in tree.iter(tag='assessmentmetadata'): for elem_elem in elem: if elem_elem.tag == "bbmd_asi_object_id": id = elem_elem.text else: if "id" in root.attrib: id = root.attrib["id"] elif root.tag == "questestinterop": for elem in tree.iter(tag='assessmentmetadata'): for elem_elem in elem: if elem_elem.tag == "bbmd_asi_object_id": id = elem_elem.text else: id = '0' return id """ Ingest Moodle Data """ def populate_dim_users_table_moodle(user_membership_resourcefile, course_id, course_type): """ Extracts users from the course export files and inserts into the dim_users table Todo: Update to include all roles not only students Args: user_membership_resourcefile: file with the list of users course_id: course id course_type: Moodle or MoodleMacquarie """ print "populate_dim_users_table_moodle" , course_type role_skip_list = [] if course_type=="Moodle": role_skip_list = ["Staff","None", None] tree = ET.ElementTree(file=user_membership_resourcefile) root = tree.getroot() firstname = "" lastname = "" role = "" username = "" email = "" lms_id = "" global staff_list trans = connection.begin() for child_of_root in root: lms_id = child_of_root.attrib["id"] for child_child_of_root in child_of_root: if (child_child_of_root.tag== "firstname"): firstname = child_child_of_root.text if (child_child_of_root.tag== "lastname"): lastname = child_child_of_root.text if (child_child_of_root.tag== "department"): role = child_child_of_root.text if (child_child_of_root.tag== "username"): username = child_child_of_root.text if (child_child_of_root.tag== "email"): email = child_child_of_root.text user_pk = str(course_id) + "_" + lms_id if (firstname is None) or (len(firstname) == 0): firstname = "blank" else: firstname = firstname.replace("'", "''") if (lastname is None) or (len(lastname) == 0): lastname = "blank" else: lastname = lastname.replace("'", "''") if (email is None) or (len(email) == 0): email = "blank" if course_type=="Moodle": if (role not in ["Staff","None", None]): staff_list.append(int(lms_id)) row = {"lms_id": lms_id, "firstname": firstname, "lastname": lastname, "username": username, "email": email, "role": role, "user_pk": user_pk, "course_id": course_id} save_summaryobject ("dim_users", row) elif course_type=="MoodleMacquarie": if "students" in email: staff_list.append(int(lms_id)) role = "Student" row = {"lms_id": lms_id, "firstname": firstname, "lastname": lastname, "username": username, "email": email, "role": role, "user_pk": user_pk, "course_id": course_id} save_summaryobject ("dim_users", row) trans.commit() def build_starschema_moodle(course_log_file, course_id, content_type, forum_id): """ Extracts logs from the Moodle export format and inserts as a row in the fact_coursevisits table. """ global staff_list tree = ET.ElementTree(file=course_log_file) root = tree.getroot() datetime_str = "" lms_user_id = "" module = "" action = "" url = "" info = "" section_order = 0 count = 1 access_sql = "BEGIN TRANSACTION;" trans = connection.begin() for child_of_root in root: count +=1 for child_child_of_root in child_of_root: if (child_child_of_root.tag== "time"): datetime_str = child_child_of_root.text if (child_child_of_root.tag== "userid"): lms_user_id = child_child_of_root.text if (child_child_of_root.tag== "module"): module = child_child_of_root.text if (child_child_of_root.tag== "action"): action = child_child_of_root.text if (child_child_of_root.tag== "url"): url = child_child_of_root.text if (child_child_of_root.tag== "info"): info = child_child_of_root.text date_id = time.strftime("%d-%b-%y", time.gmtime(int(datetime_str))) #datetime.datetime.strptime(arow["TIMESTAMP"], "%d-%b-%y") time_id = time.strftime("%H:%M:%S", time.gmtime(int(datetime_str))) session_id = 0 page_id = 0 section_id = 0 if (not((url is None) or (len(url) == 0))): parsed_url = urlparse.urlparse(url) query_as_dict = urlparse.parse_qs(parsed_url.query) if "id" in query_as_dict: page_id = query_as_dict["id"][0] if "sectionid" in query_as_dict: section_id = query_as_dict["sectionid"][0] if "section" in query_as_dict: section_order = int(query_as_dict["section"][0])-1 # I think the numbers are added by 1 here if (module in ["forum", "wiki"]): page_id = forum_id #info user_pk = str(course_id) + "_" + lms_user_id page_pk = str(course_id) + "_" + str(page_id) section_pk = str(course_id) + "_" + str(section_id) fmt = '%d-%b-%y %H:%M:%S' dt = datetime.datetime.strptime(date_id + " " + time_id, fmt) unixtimestamp = time.mktime(dt.timetuple()) if (info is None) or (len(info) == 0): info = "-" else: info = info.replace("'", "''") info = info.replace("%", "\%") #escape % sign if (url is None) or (len(url) == 0): url = "" else: url = info.replace("%", "\%") #escape % sign row = {} if (lms_user_id not in staff_list): if (module not in ['label', 'role', 'unisa_module', 'wizard']): if ((action not in ['add mod', 'update mod', 'editsection', 'enrol', 'unenrol', 'report log', 'loginas'])): if section_order > 0: row = {"date_id": date_id, "time_id": time_id, "course_id": course_id, "datetime": datetime_str, "user_id": lms_user_id, "module": module, "action": action, "url": url, "page_id": page_id, "section_id": section_id, "section_order": section_order , "pageview": 1, "user_pk": user_pk, "page_pk": page_pk, "section_pk": section_pk, "unixtimestamp": unixtimestamp, "info": info} else: row = {"date_id": date_id, "time_id": time_id, "course_id": course_id, "datetime": datetime_str, "user_id": lms_user_id, "module": module, "action": action, "url": url, "page_id": page_id, "section_id": section_id, "pageview": 1, "user_pk": user_pk, "page_pk": page_pk, "section_pk": section_pk, "unixtimestamp": unixtimestamp, "info": info} connection.execute(return_summaryobjectsql("fact_coursevisits", row)) trans.commit() def build_starschema_moodlemacquarie(filepath, course_id): """ Extracts csv log from the Moodle Macquarie and inserts as a row in the fact_coursevisits table. """ print "build_starschema_moodlemacquarie" global staff_list action_skip_list = ['updated', 'unassigned', 'assigned', 'deleted', 'updated', 'loggedinas'] print action_skip_list trans = connection.begin() count = 0 header = None with open(filepath, 'rb') as f: reader = csv.reader(f) for row in reader: count = count + 1 if (header == None): header = row continue arow = {} for header_index in range (0, len(header)): arow[(header[header_index])] = row[header_index] # Process row # Headings id eventname component action target objecttable objectid crud edulevel contextid # contextlevel contextinstanceid userid courseid relateduserid anonymous other # timecreated origin ip realuserid log_id = arow["id"] eventname = arow["eventname"] component = arow["component"] action = arow["action"] target = arow["target"] objecttable = arow["objecttable"] #page_id = arow["objectid"] crud = arow["crud"] edulevel = arow["edulevel"] contextid = arow["contextid"] contextlevel = arow["contextlevel"] page_id = arow["contextinstanceid"] lms_user_id = arow["userid"] relateduserid = arow["relateduserid"] anonymous = arow["anonymous"]
0}, '>14524152': {'str': 0, 'sma': 9, 'larj': 2, 'weaj': 2, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 5, 'bl5': 0, 'pa1': 0}, '>14525342': {'str': 2, 'sma': 6, 'larj': 4, 'weaj': 2, '345': 0, '3t4': 1, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>14525452': {'str': 2, 'sma': 6, 'larj': 4, 'weaj': 2, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>14535452': {'str': 2, 'sma': 2, 'larj': 5, 'weaj': 2, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>13415342': {'str': 4, 'sma': 5, 'larj': 6, 'weaj': 2, '345': 0, '3t4': 2, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, }, 'F': { '>124251': {'str': 2, 'sma': 2, 'larj': 3, 'weaj': 1, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>123151': {'str': 2, 'sma': 2, 'larj': 2, 'weaj': 0, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 5, 'bl5': 0, 'pa1': 0}, '>123251': {'str': 6, 'sma': 2, 'larj': 7, 'weaj': 0, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>124151': {'str': 0, 'sma': 3, 'larj': 0, 'weaj': 1, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 5, 'bl5': 0, 'pa1': 0}, '>135251': {'str': 2, 'sma': 4, 'larj': 3, 'weaj': 0, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>125251': {'str': 2, 'sma': 6, 'larj': 3, 'weaj': 0, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>235251': {'str': 2, 'sma': 4, 'larj': 4, 'weaj': 0, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>123141': {'str': 2, 'sma': 0, 'larj': 2, 'weaj': 1, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 5, 'bl5': 0, 'pa1': 0}, '>124141': {'str': 0, 'sma': 1, 'larj': 0, 'weaj': 2, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 5, 'bl5': 0, 'pa1': 0}, '>134251': {'str': 6, 'sma': 2, 'larj': 5, 'weaj': 1, '345': 0, '3t4': 1, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, }, 'G': { '>3432321': {'str': 0, 'sma': 0, 'larj': 0, 'weaj': 1, '345': 0, '3t4': 1, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>2321321': {'str': 0, 'sma': 1, 'larj': 0, 'weaj': 0, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>3431321': {'str': 0, 'sma': 2, 'larj': 0, 'weaj': 1, '345': 0, '3t4': 1, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>4542321': {'str': 0, 'sma': 2, 'larj': 0, 'weaj': 2, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>2432321': {'str': 0, 'sma': 4, 'larj': 0, 'weaj': 1, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>4532321': {'str': 0, 'sma': 4, 'larj': 0, 'weaj': 1, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>2132321': {'str': 0, 'sma': 6, 'larj': 0, 'weaj': 0, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>2321421': {'str': 0, 'sma': 5, 'larj': 0, 'weaj': 1, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>2321431': {'str': 0, 'sma': 4, 'larj': 0, 'weaj': 1, '345': 0, '3t4': 0, 'bl4': 1, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>3132321': {'str': 0, 'sma': 8, 'larj': 0, 'weaj': 0, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 0, 'bl5': 0, 'pa1': 0}, '>1321321': {'str': 0, 'sma': 3, 'larj': 0, 'weaj': 0, '345': 0, '3t4': 0, 'bl4': 0, 'bl1': 3, 'bl5': 0, 'pa1': 0}, '>3432431': {'str': 0, 'sma': 3, 'larj': 0, 'weaj': 2, '345': 0, '3t4': 1, 'bl4': 1, 'bl1': 0, 'bl5': 0, 'pa1': 0}, } } subcost_names = ('str', 'sma', 'larj', 'pcc', 'pcs', 'weaj', '345', '3t4', 'bl4', 'bl1', 'bl5', 'pa1') solutions = { 'A': { 6: {'>24342313': True}, 7: {'>13231213': True}, 8: {'>14342313': True, '>35453423': True}, 9: {'>13231323': True, '>13231423': True, '>13242313': True}, 10: {'>12142313': True, '>13232313': True, '>24342323': True} }, 'B': { 7: {'>3124': True, '>3214': True, '>4235': True}, 11: {'>4215': True}, 14: {'>1235': True, '>2124': True, '>3125': True, '>3215': True}, 15: {'>3235': True, '>4125': True} }, 'C': { 9: {'>21241': True, '>21251': True}, 10: {'>31241': True, '>31251': True, '>32351': True, '>42351': True}, 11: {'>31351': True, '>42151': True}, 12: {'>21351': True, '>32151': True} }, 'D': { 12: {'>1323132': True, '>1324132': True}, 13: {'>2412132': True}, 14: {'>1212132': True, '>1213242': True, '>1323142': True}, 15: {'>1323242': True, '>2413242': True, '>2435132': True}, 16: {'>1213232': True} }, 'E': { 24: {'>14523152': True}, 28: {'>14523142': True}, 31: {'>14512152': True}, 32: {'>14513152': True, '>14515342': True, '>14515352': True, '>14524152': True}, 33: {'>13412152': True, '>13523152': True, '>14515452': True} }, 'F': { 17: {'>124251': True}, 19: {'>124151': True}, 20: {'>123151': True, '>124141': True}, 22: {'>123141': True, '>124131': True}, 23: {'>135141': True, '>135251': True}, 24: {'>125251': True, '>135131': True} }, 'G': { 2: {'>3432321': True}, 3: {'>2321321': True}, 5: {'>3431321': True}, 6: {'>2432321': True, '>4542321': True}, 7: {'>2132321': True}, 8: {'>2321421': True, '>2321431': True, '>3132321': True, '>3432421': True} } } class JacobsTest(unittest.TestCase): def test_distance(self): jacobs = Jacobs(segmenter=ManualDSegmenter(), segment_combiner="cost") c4e4_dist = jacobs.distance(from_midi=60, to_midi=64) e4g4_dist = jacobs.distance(from_midi=64, to_midi=67) self.assertEqual(c4e4_dist, e4g4_dist, "Bad distance") e4c4_dist = jacobs.distance(from_midi=64, to_midi=60) g4e4_dist = jacobs.distance(from_midi=67, to_midi=64) self.assertEqual(e4c4_dist, g4e4_dist, "Bad negative distance") def test_four_note_example(self): jacobs = Jacobs(segmenter=ManualDSegmenter(), segment_combiner="cost") d_corpus = DCorpus(corpus_str=TestConstant.FOUR_NOTES) jacobs.load_corpus(d_corpus=d_corpus) suggestions, costs, details = jacobs.generate_advice(staff="upper", k=2) self.assertEqual(len(suggestions), 2, "No loops in that dog in top ten") # jacobs.report_on_advice(suggestions, costs, details) @staticmethod def test_cycles(): jake = Jacobs() jake.segment_combiner(method="cost") d_corpus = DCorpus(corpus_str=TestConstant.FOUR_NOTES) jake.load_corpus(d_corpus=d_corpus) suggestions, costs, details = jake.generate_advice(staff="upper", cycle=4, k=2) assert len(suggestions) == 2, "No loops in that dog in top ten" # jake.report_on_advice(suggestions, costs, details) d_corpus = DCorpus(corpus_str=TestConstant.PARNCUTT_HUMAN_FRAGMENT['B']) jake.load_corpus(d_corpus=d_corpus) suggestions, costs, details = jake.generate_advice(staff="upper", cycle=4, k=16) assert len(suggestions) == 16, "There should be 16 cyclic fingerings!" # jake.report_on_advice(suggestions, costs, details) @staticmethod def test_fingering_counts(): jake = Jacobs(pruning_method="none") jake.segment_combiner(method="cost") d_corpus = DCorpus(corpus_str=TestConstant.FOUR_NOTES) jake.load_corpus(d_corpus=d_corpus) suggestions, costs, details = jake.generate_advice(staff="upper", k=2) assert jake.last_segment_pruned_count() == 320, "Bad none pruning on open-ended problem" # suggestions, costs, details = jake.generate_advice(staff="upper", last_digit=5, k=10) # print(suggestions) # print(jake.last_segment_pruned_count()) @staticmethod def test_bl1(): jake = Jacobs() jake.segment_combiner(method="cost") midi_1 = None handed_digit_1 = '-' midi_2 = 61 handed_digit_2 = '>1' midi_3 = 65 handed_digit_3 = '>3' trigram_node = TrigramNode(midi_1, handed_digit_1, midi_2, handed_digit_2, midi_3, handed_digit_3) cost, costs = jake.trigram_node_cost(trigram_node=trigram_node) assert costs['bl1'] == 3, "Bad bl1 cost" @staticmethod def test_good_rules(): jake = Jacobs() jake.segment_combiner(method="cost") for id in subcosts: d_corpus = DCorpus(corpus_str=TestConstant.PARNCUTT_HUMAN_FRAGMENT[id]) jake.load_corpus(d_corpus=d_corpus) if id == 'B': suggestions, costs, details = jake.generate_advice(staff="upper", cycle=4, k=20) else: suggestions, costs, details = jake.generate_advice(staff="upper", last_digit=last_digit[id], k=30) details_for_sugg = dict() for i in range(len(details)): details_for_sugg[suggestions[i]] = details[i][0] # 0 index because we only have one segment jake.report_on_advice(suggestions, costs, details) for gold_sugg in subcosts[id]: assert gold_sugg in details_for_sugg, \ "Missing suggestion {0} in {1}".format(gold_sugg, id) for rule in subcosts[id][gold_sugg]: if rule == '345': continue gold_cost = subcosts[id][gold_sugg][rule] cost = details_for_sugg[gold_sugg][rule] assert cost == gold_cost, \ "Bad {0} cost for {1} in {2}: {3} should be {4}".format(rule, gold_sugg, id, cost, gold_cost) # # @staticmethod # # def test_corrected_jake(): # # jake = Jacobs(pruning_method="none") # # jake.segment_combiner(method="cost") # # # # for id in subcosts: # # if id != 'B': # # continue # # d_corpus = DCorpus(corpus_str=TestConstant.PARNCUTT_HUMAN_FRAGMENT[id]) # # jake.load_corpus(d_corpus=d_corpus) # # if id == 'B': # # suggestions, costs, details = jake.generate_advice(staff="upper", cycle=4, k=500) # # else: # # suggestions, costs, details = jake.generate_advice(staff="upper", last_digit=last_digit[id], k=500) # # playable_count = jake.last_segment_pruned_count() # # jake.report_on_advice(suggestions, costs, details) # # # Output for LaTeX report. # # for i in range(len(details)): # # sugg = suggestions[i] # # cost = costs[i] # # subcosts_for_sugg = details[i][0] # 0 index because we only have one segment # # record_str = "{0}&{1}&".format(i+1, sugg[1:]) # # for subcost_name in subcost_names: # # record_str += str(subcosts_for_sugg[subcost_name]) + '&' # # record_str += str(int(cost)) + "\\\\" # # print(record_str) # # print("Piece {0}, playable fingerings: {1}".format(id, playable_count)) # # # The solutions from original Parncutt paper are inconsistent for painfully well-documented reasons. # # Leave the following commented out. # # for id in solutions: # # d_corpus = DCorpus(corpus_str=TestConstant.PARNCUTT_HUMAN_FRAGMENT[id])
<reponame>JaiWillems/SatPy """Localize position information representations. This module provides the `Coordinate` class to allow for a simple interface for converting a position type into various representations. The class is also used for position inputs and outputs for other `Celest` functionality. """ from celest.core.decorators import set_module from celest.satellite._angle_representations import _ISO6709_representation from typing import Any, Literal, Tuple import numpy as np @set_module('celest.satellite') class Coordinate(object): """Localize position information representations. The `Coordinate` class provides a simple user interface for converting a position type into various representations. The class is also used for position inputs and outputs for other `Celest` functionality. Parameters ---------- position : np.ndarray Base position to initialize the `Coodinate` class. frame : {"gcrs", "geo", "itrs"} Specifies the input position frame. time : Time Times associated with the position data in the J2000 epoch. The length of the `time` parameter must match the length of the `position` parameter. Attributes ---------- time : Time Times corresponding to the position data. length : int Length of the input position and time arrays. Methods ------- geo(iso=False) Return geographical position data. era() Return the Earth rotation angles in radians and decimals. gcrs() Return cartesian gcrs position data. itrs() Return cartesian itrs position data. horizontal(location) Return horizontal position data in degrees and decimals. off_nadir(location) Return the off-nadir angle to a ground location. altitude() Return the altitude above Earth's surface in kilometres. distance(location) Return the distance to a ground location. """ def __init__(self, position: np.ndarray, frame: Literal["gcrs", "geo", "itrs"], time: Any) -> None: """Initialize attributes.""" if frame not in ["gcrs", "geo", "itrs"]: raise ValueError(f"{frame} is not a valid frame.") if position.shape[0] != len(time): raise ValueError(f"position and time data lengths are mismatched being {position.shape[0]} and {len(time)}") self.time = time self._GEO = None self._GCRS = None self._ITRS = None self.length = None self._set_base_position(position, frame) def __len__(self) -> int: """Return length of position and time data.""" return self.length def _set_base_position(self, position: np.ndarray, frame: Literal["gcrs", "geo", "itrs"]) -> None: """Initialize base position. This method takes an input position to initialize the object's base position. Parameters ---------- position : np.ndarray Array of shape (n, 2) or (n, 3) containing the inputted position data. frame : {"gcrs", "geo", "itrs"} String defining the type of input position data as the Geocentric Celestial Reference System (gcrs), International Terrestrial Reference System (itrs), or geographical (geo) data. Notes ----- The input data must be of shape (n, 3) if `type="itrs"` or `type="gcrs"` where the columns are XYZ cartesian data. The data can be of the shape (n, 2) or (n, 3) if `type="geo"` where the columns are geodetic latitude, terrestrial longitude, and geodetic altitude. When geographical data is entered of shape (n, 2), the height data is assumed to be zero. """ basePos = position self.length = basePos.shape[0] if frame == "geo": if basePos.shape[1] == 2: height = np.zeros((self.length, 1)) basePos = np.concatenate((basePos, height), axis=1) self._GEO = basePos elif frame == "gcrs": self._GCRS = basePos elif frame == "itrs": self._ITRS = basePos def _geo_to_itrs(self, position: np.ndarray) -> np.ndarray: """Convert geographical to itrs coordinates. Parameters ---------- position : np.ndarray Array of shape (n, 3) containing geographical coordinates of a position with columns of geodetic latitude, terrestrial longitude, and geodetic altitude given in decimal degrees and kilometres. Returns ------- np.ndarray Array of shape (n, 3) containing the XYZ itrs position data. See Also -------- _itrs_to_geo : Convert itrs to geographical coordinates. Notes ----- This method uses an ellipsoid based model of the Earth to convert a geographical position to itrs cartesian coordinates using the methods described in "Coordinate Systems in Geodesy" by <NAME> and <NAME> as presented by <NAME>. [KW98a]_ [Lum20]_ References ---------- .. [KW98a] <NAME> and <NAME>. Coordinate Systems in Geodesy. Jan. 1998. .. [Lum20] <NAME>. Geodetic Coordinates: Computing Latitude and Longitude.June 2020.url:https://www.youtube.com/watch?v=4BJ-GpYbZlU. """ a = 6378.137 b = 6356.752314245 lat, lon = np.radians(position[:, 0]), np.radians(position[:, 1]) e = np.sqrt(1 - b ** 2 / a ** 2) N = a / np.sqrt(1 - e ** 2 * np.sin(lat) ** 2) h = position[:, 2] x = ((N + h) * np.cos(lat) * np.cos(lon)).reshape((-1, 1)) y = ((N + h) * np.cos(lat) * np.sin(lon)).reshape((-1, 1)) z = ((N * (1 - e ** 2) + h) * np.sin(lat)).reshape((-1, 1)) itrs = np.concatenate((x, y, z), axis=1) return itrs def _itrs_to_geo(self, position: np.ndarray) -> np.ndarray: """Convert itrs to geographical coordinates. Parameters ---------- position : np.ndarray Array of shape (n, 3) with rows containing XYZ itrs position data. Returns ------- np.ndarray Array of shape (n, 3) with columns of geodetic latitude, terrestrial longitude, and geodetic altitude data in degrees and kilometres. See Also -------- _GEO_to_itrs : Convert geographical to itrs coordinates. Notes ----- Let :math:`x`, :math:`y`, and :math:`z` be the itrs vector components. We can then calculate the latitude, :math:`\phi`, using the following equation: .. math:: \phi = 90^\circ - \cos^{-1}\left(\frac{z}{R_\bigoplus}\right) where :math:`R_\bigoplus` is the geocentric radius of the Earth. We can also calculate the longitude, :math:`\lambda` using the following: .. math::\lambda = \tan^{-1}_2\left(y, x\right) """ # Cartesian coordinates. x = position[:, 0] y = position[:, 1] z = position[:, 2] # Convert to spherical coordinates. radius = np.sqrt(x ** 2 + y ** 2 + z ** 2) theta = np.degrees(np.arccos(z / radius)) phi = np.degrees(np.arctan2(y, x)) # Get Geodetic altitude. alt = self.altitude() # Fomulate output array. lat = (90 - theta).reshape(-1, 1) lon = phi.reshape(-1, 1) alt = alt.reshape(-1, 1) geo = np.concatenate((lat, lon, alt), axis=1) return geo def geo(self, iso: bool=False) -> np.ndarray: """Return geographical position data. Parameters ---------- iso : bool, optional Formats the output as ISO6709 sexagesimal position strings if true. Returns ------- np.ndarray Array of shape (n, 3) with columns of geodetic latitude, terrestrial longitude, and geodetic altitude data. If `iso=True`, the output is an array of shape (n,) containing standard representation position strings. See Also -------- itrs : Return itrs position data. gcrs : Return gcrs position data. Examples -------- >>> time = Time(julian=np.array([2454545])) >>> position = np.array([[6343.82, -2640.87, -11.26]]) >>> coor = Coordinate(position=position, frame="itrs", time=time) >>> coor.geo() np.array([[-9.38870528e-02, -2.26014826e+01, 5.04126976e+02]]) We can generate user-friendly position strings by setting `iso=True`. >>> coor.geo(iso=True) np.array(['00°05′37.99″S 22°36′05.34″W 504.12697']) """ if self._GEO is None: if self._ITRS is not None: self._GEO = self._itrs_to_geo(position=self._ITRS) else: self._ITRS = self._gcrs_and_itrs(position=self._GCRS, frame="gcrs") self._GEO = self._itrs_to_geo(position=self._ITRS) geo = _ISO6709_representation(position=self._GEO) if iso else self._GEO return geo def era(self) -> np.ndarray: """Return the Earth rotation angles in degrees and decimals. Returns ------- np.ndarray Array of shape (n,) containing Earth rotation angles in degrees and decimals. Notes ----- The Earth rotation angle denotes the diurnal rotation component of the coordinate misalignment between the ECI and ECEF frames. The angle can be calculated from the following formulation: .. math:: \gamma^\circ = 360.9856123035484\Delta T + 280.46 where :math:`\Delta T=JD-2451545` is the elapsed days since the J2000 epoch where :math:`JD` is the Julian day. [Kok17a]_ References ---------- .. [Kok17a] <NAME>. Changing Coordinates in the Context of Orbital Mechanics. Cyber and Electronic Warfare Division, Defence Science, and Technology Group, Jan.2017, p. 12 - 13. Examples -------- >>> time = Time(julian=np.array([2454545])) >>> position = np.array([[6343.82, -2640.87, -11.26]]) >>> coor = Coordinate(position=position, type="itrs", time=time) >>> coor.era() np.array([6.2360075]) """ ang = np.zeros((self.length,)) jul_data = self.time.julian() # Multiply time elapsed since J2000 by Earth rotation rate and add # J2000 orientation. dJulian = jul_data - 2451545 ang = (360.9856123035484 * dJulian + 280.46) % 360 return ang def _gcrs_and_itrs(self, position: np.ndarray, frame: Literal["itrs", "gcrs"]) -> np.ndarray: """Convert between gcrs and itrs positions. Parameters ---------- position : np.ndarray Array of shape (n, 3) representing the input data as XYZ cartesian data. frame : {"itrs", "gcrs"} The type of input data. Returns ------- np.ndarray Array of shape (n, 3) representing the output data as XYZ cartesian data. Notes ----- This method applies a simplification to converting between gcrs and itrs coordinate by disregarding precession, nutation, and polar motion effects (which will be incorporated in future releases).
not self._vector: self._vector = Vector(((w, self.tfidf(w)) for w in self.terms)) return self._vector def keywords(self, top=10, normalized=True): """ Returns a sorted list of (relevancy, word)-tuples that are top keywords in the document. With normalized=True, weights are normalized between 0.0 and 1.0 (their sum will be 1.0). """ n = normalized and sum(self.vector.itervalues()) or 1.0 v = ((f/n, w) for w, f in self.vector.iteritems()) v = heapq.nsmallest(top, v, key=lambda v: (-v[0],v[1])) return ftlist(v) def similarity(self, document): """ Returns the similarity between the two documents as a number between 0.0-1.0. If both documents are in the same corpus the calculations are cached for reuse. """ if self._corpus: return self._corpus.similarity(self, document) elif document._corpus: return document._corpus.similarity(self, document) else: f = Corpus((self, document)).similarity(self, document) # Unlink both documents from the ad-hoc corpus: self._corpus = document._corpus = None return f def copy(self): d = Document(name=self.name); d.terms.update(self); return d def __eq__(self, document): return isinstance(document, Document) and self._id == document._id def __ne__(self, document): return not self.__eq__(document) def __repr__(self): return "Document(id=%s, %scount=%s)" % ( self._id, self.name and "name=%s, " % repr(self.name) or "", self.count) #--- VECTOR ------------------------------------------------------------------------------------------ class WeightError(Exception): pass class Vector(dict): def __init__(self, *args, **kwargs): """ Vector is a dictionary of (word, weight)-items based on the terms in a Document. """ self.weight = kwargs.pop("weight", TFIDF) # Vector weights based on tf or tf-idf? self._norm = None dict.__init__(self, *args, **kwargs) @property def frobenius_norm(self): """ Yields the Frobenius matrix norm. n = the square root of the sum of the absolute squares of the values. The matrix norm is used when calculating cosine similarity between documents. """ if not self._norm: self._norm = sum(x**2 for x in self.itervalues())**0.5 return self._norm norm = l2_norm = frobenius_norm def copy(self): return Vector(self) def __call__(self, vector={}): if isinstance(vector, (Document, Corpus)): vector = vector.vector if isinstance(vector, Vector) and self.weight != vector.weight: raise WeightError, "mixing %s vector with %s vector" % (self.weight, vector.weight) # Return a copy of the vector, updated with values from the other vector. # Only keys that appear in this vector will be updated (i.e. no new keys are added). V = self.copy(); V.update((k,v) for k,v in vector.iteritems() if k in V); return V #--- CORPUS ------------------------------------------------------------------------------------------ NORM, TOP300 = "norm", "top300" class Corpus(object): def __init__(self, documents=[]): """ A corpus is a collection of documents, where each document is a bag of (word, count)-items. Documents can be compared for similarity. """ self.documents = readonlydict() # Document.id => Document self._index = {} # Document.name => Document self._similarity = {} # Cache of ((D1.id,D2.id), weight)-items (cosine similarity). self._vector = None # Cache of corpus vector with all the words in the corpus. self._update() self.extend(documents) @classmethod def build(Corpus, path, *args, **kwargs): """ Builds the corpus from a folder of text documents (e.g. path="folder/*.txt"). Each file is split into words and the words are counted. """ documents = [] for f in glob.glob(path): kwargs["name"] = name=filename(f) documents.append(Document.open(f, *args, **kwargs)) return Corpus(documents) @classmethod def load(Corpus, path): """ Loads the corpus from a pickle file created with Corpus.save(). """ return cPickle.load(open(path)) def save(self, path, update=False): """ Saves the corpus as a pickle file at the given path. It can be loaded with Corpus.load(). This is faster because the words in the documents do not need to be stemmed again, and cached vectors and similarities are stored """ if update: for d1 in self.documents.values(): for d2 in self.documents.values(): self.cosine_similarity(d1, d2) # Update the entire cache before saving. cPickle.dump(self, open(path, "w")) def _update(self): # Ensures that all document relevancy vectors are recalculated # when a document is added or deleted in the corpus (= new words or less words). self._vector = None self._similarity = {} for document in self.documents.values(): document._vector = None def __len__(self): return len(self.documents) def __iter__(self): return iter(self.documents.values()) def __getitem__(self, id): return self.documents[id] def __delitem__(self, id): if not id in self.documents: return d = dict.pop(self.documents, id) d.corpus = None self._index.pop(d.name, None) self._update() def clear(self): dict.clear(self.documents) self._update() def append(self, document): """ Appends the given Document to the corpus, setting the corpus as its parent. The corpus is updated, meaning that the cache of vectors and similarities is cleared (relevancy and similarity weights will be different now that there is a new document). """ document.corpus = self if document.name is not None: self._index[document.name] = document dict.__setitem__(self.documents, document.id, document) self._update() def extend(self, documents): for document in documents: document.corpus = self if document.name is not None: self._index[document.name] = document dict.__setitem__(self.documents, document.id, document) self._update() def remove(self, document): self.__delitem__(document.id) def document(self, name): # This assumes document names are unique. if name in self._index: return self._index[name] if isinstance(name, int): return self.documents.get(name) def document_frequency(self, word): """ Returns the document frequency of a word. Returns 0 if there are no documents in the corpus (e.g. no word frequency). df = number of documents containing the word / number of documents. The more occurences of the word across the corpus, the higher its df weight. """ if len(self.documents) == 0: return 0 return len([True for d in self.documents.values() if word in d]) / float(len(self.documents)) df = document_frequency def inverse_document_frequency(self, word): """ Returns the inverse document frequency of a word. Returns None if the word is not in the corpus, or if there are no documents in the corpus. idf = log(1/df) The more occurences of the word, the lower its idf weight (log() makes it grow slowly). """ df = self.df(word) return df != 0 and log(1.0/df) or None idf = inverse_document_frequency @property def vector(self): """ Returns a dictionary of (word, 0)-items from the corpus. It includes all words from all documents (i.e. it is the dimension of the vector space). If a document is given, sets the document word relevancy values in the vector. """ if not self._vector: self._vector = Vector(); [[self._vector.setdefault(word, 0) for word in d] for d in self.documents.values()] return self._vector # Note: # - Corpus.vector is the dictionary of (word, 0)-items. # - Corpus.vector(document) returns a copy with the document's word relevancy values in it. # Words in a document that are not in the corpus vector are ignored # (e.g. the document was not in the corpus, this can be the case in Corpus.search() for example). # See Vector.__call__() why this is possible. def cosine_similarity(self, document1, document2): """ Returns the similarity between two documents in the corpus as a number between 0.0-1.0. The weight is based on the document relevancy vectors (i.e. tf-idf of words in the text). cos = dot(v1,v2) / (norm(v1) * norm(v2)) """ # If we already calculated the similarity between the given documents, # it is available in cache for reuse. id1 = document1.id id2 = document2.id if (id1,id2) in self._similarity: return self._similarity[(id1,id2)] if (id2,id1) in self._similarity: return self._similarity[(id2,id1)] # Calculate the matrix multiplication of the document vectors. v1 = self.vector(document1) v2 = self.vector(document2) dot = sum(a*b for a,b in izip(v1.itervalues(), v2.itervalues())) # It makes no difference if we use v1.norm or document1.vector.norm, # so we opt for the second choice because it is cached. s = float(dot) / (document1.vector.norm * document2.vector.norm) # Cache the similarity weight for reuse. self._similarity[(id1,id2)] = s return s similarity = cosine_similarity def related(self, document, top=10): """ Returns a list of (weight, document)-tuples in the corpus, sorted by similarity to the given document. """ v = ((self.similarity(document, d), d) for d in self.documents.itervalues()) # Filter the input document from the matches. # Filter documents that scored 0.0 and return the top. v = [(w, d) for w, d in v if w > 0 and d.id != document.id] v = heapq.nsmallest(top, v, key=lambda v: (-v[0],v[1])) return ftlist(v) def vector_space_search(self, words=[], **kwargs): """ Returns related documents from the corpus, as a list of (weight, document)-tuples. The given words can be a string
#-*- coding: utf-8 -*- import sys import random import numpy as np import pandas as pd import utility_1 import h5py import json eps=1e-12 def countCG(strs): strs = strs.upper() return float((strs.count("C")+strs.count("G")))/(len(strs)) def countCG_N(strs): strs = strs.upper() return float((strs.count("C")+strs.count("G")))/(len(strs)-strs.count("N")+eps) def countCG_skew(strs): strs = strs.upper() num1, num2 = strs.count("G"), strs.count("C") return float((num1-num2))/(num1+num2+eps) def one_hot_encoding(seq, seq_len): vec1 = np.zeros((4,seq_len)) cnt = 0 for i in range(0,seq_len): print(i) if seq[i]=='A': vec1[0,i] = 1 elif seq[i]=='G': vec1[1,i] = 1 elif seq[i]=='C': vec1[2,i] = 1 elif seq[i]=='T': vec1[3,i] = 1 else: pass return np.int64(vec1) def index_encoding(seq, seq_len, seq_dict): vec1 = np.zeros(seq_len) for i in range(0,seq_len): vec1[i] = seq_dict[seq[i]] return np.int64(vec1) # Read sequences as strings ("N" retained) def getString(fileStr): file = open(fileStr, 'r') gen_seq = "" lines = file.readlines() for line in lines: line = line.strip() gen_seq += line gen_seq = gen_seq.upper() return gen_seq # Read sequences of format fasta ("N" removed) def getStringforUnlabel(fileStr): file = open(fileStr, 'r') gen_seq = "" lines = file.readlines() for line in lines: if(line[0] == ">"): continue else: line = line.strip() gen_seq += line gen_seq = gen_seq.upper() gen_seq = gen_seq.replace("N", "") return gen_seq def get_reverse_str(str): str = str.upper() str_new="" for i in range(len(str)): if(str[i]=="T"): str_new+="A" elif(str[i]=="A"): str_new+="T" elif(str[i]=="G"): str_new+="C" elif(str[i]=="C"): str_new+="G" else: str_new+=str[i] return str_new # Get sequence of 2K+1 centered at pos def getSubSeq(str, pos, K): n = len(str) l = pos - K r = pos + K + 1 if l > r or l < 0 or r > n - 1: return 0 elif "N" in str[l:r]: return 0 return str[l:r] # Get sequence of 2K+1 centered at pos def getSubSeq2(str, pos, K): n = len(str) l = max(0, pos - K) r = min(n - 1, pos + K + 1) if l > r: print(l, pos, r) print("left pointer is bigger than right one") return 0 return str[l:pos]+" "+str[pos]+" "+str[pos+1:r] # Convert DNA to sentences with overlapping window of size K def DNA2Sentence(dna, K): sentence = "" length = len(dna) for i in range(length - K + 1): sentence += dna[i: i + K] + " " # remove spaces sentence = sentence[0 : len(sentence) - 1] return sentence # Convert DNA to sentences with overlapping window of size K in reverse direction def DNA2SentenceReverse(dna, K): sentence = "" length = len(dna) for i in range(length - K + 1): j = length - K - i sentence += dna[j: j + K] + " " # remove spaces sentence = sentence[0 : len(sentence) - 1] return sentence def reverse(s): str = "" for i in s: str = i + str return str # Convert DNA to sentences with overlapping window of size K in reverse direction def DNA2SentenceReverse_1(dna, K): sentence = "" length = len(dna) dna = reverse(dna) for i in range(length - K + 1): sentence += dna[i: i + K] + " " # remove spaces sentence = sentence[0 : len(sentence) - 1] return sentence # Convert DNA to sentences with non-overlapping window of size K def DNA2SentenceJump(dna, K,step): sentence = "" length = len(dna) i=0 while i <= length - K: sentence += dna[i: i + K] + " " i += step return sentence # Convert DNA to sentences with non-overlapping window of size K in reverse direction def DNA2SentenceJumpReverse(dna, K,step): sentence = "" length = len(dna) i=0 while j <= length - K: i = length - K - j sentence += dna[i: i + K] + " " j += step return sentence def gen_Seq(Range): print ("Generating Seq...") table = pd.read_table(PATH1+"prep_data.txt",sep = "\t") print (len(table)) table.drop_duplicates() print (len(table)) label_file = open(PATH1+"LabelSeq", "w") total = len(table) list = ["chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", \ "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", \ "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY","chrM"] number_positive = 0 dict_pos={} for i in range(total): if (number_positive % 100 == 0) and (number_positive != 0): print ("number of seq: %d of %d\r" %(number_positive,total),end = "") sys.stdout.flush() chromosome = table["chromosome"][i] if chromosome in dict_pos.keys(): strs = dict_pos[chromosome] else: strs = processSeq.getString(ROOT_PATH1+"Chromosome_38/" + str(chromosome) + ".fa") dict_pos[chromosome] = strs bias = 7 start = int(table["start"][i] - 1 - Range + bias) end = start + 23 + Range*2 strand = table["strand"][i] edstrs1 = strs[start : end] if strand == "-": edstrs1 = edstrs1[::-1] edstrs1 = processSeq.get_reverse_str(edstrs1) if "N" in edstrs1: table = table.drop(i) continue outstr = "%s\n"%(edstrs1) label_file.write(outstr) number_positive += 1 table.to_csv(PATH1+"prep_data.txt",sep = "\t",index = False) def get_target(): table = pd.read_table(PATH1+"prep_data.txt", sep="\t") print (len(table)) table.drop_duplicates() print (len(table)) target_file = open(PATH1+"TargetSeq", "w") for i in range(len(table)): target = table['target'][i].upper() target_file.write(target+"\n") target_file.close() def prep_data(): chrom_list = ["chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", \ "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", \ "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY","chrM"] tab = pd.read_table(PATH1+"casoffinder_CHANGEseq_joined.tsv",sep = '\t') tab = tab[tab['chromosome'].isin(chrom_list)] tab['label'] = 1 - tab['reads'].isna() tab['end'] = tab['start'] + 23 print (tab['chromosome'].unique()) tab.to_csv(PATH1+"prep_data.txt",sep = "\t",index = False) def load_file(f_name,length,vec_name): base_code = { 'A': 0, 'C': 1, 'G': 2, 'T': 3, } num_pairs = sum(1 for line in open(f_name)) # number of sample pairs num_bases = 4 with open(f_name, 'r') as f: line_num = 0 # number of lines (i.e., samples) read so far for line in f.read().splitlines(): if (line_num % 100 == 0) and (line_num != 0): print ("number of input data: %d\r" %(line_num),end= "") sys.stdout.flush() if line_num == 0: # allocate space for output seg_length = length # number of bases per sample Xs_seq1 = np.zeros((num_pairs, num_bases, seg_length)) for start in range(len(line)): if line[start] in base_code: print (start) break base_num = 0 for x in line[start:start+length]: if x != "N": Xs_seq1[line_num, base_code[x], base_num] = 1 base_num += 1 line_num += 1 X = Xs_seq1 np.save("../%s" %(vec_name),X) def kmer_dict(K): vec1 = ['A','G','C','T'] vec2 = vec1.copy() # kmer dict vec3 = [] num1 = len(vec1) for k1 in range(1,K): for character in vec1: for temp1 in vec2: seq1 = character+temp1 vec3.append(seq1) vec2 = vec3.copy() vec3 = [] return vec2 def kmer_counting(seq, K, kmer_dict1): len1 = len(kmer_dict1) vec = np.zeros((len1),dtype=np.float32) len2 = len(seq)-K+1 cnt = 0 for kmer in kmer_dict1: num1 = seq.count(kmer) vec[cnt] = num1 cnt = cnt+1 vec = vec*1.0/len2 return vec def align_region(species_id): col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id) def load_seq_kmer(species_id, file1, filename2, K, kmer_dict1): # file1 = pd.read_csv(filename1,sep='\t') col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id) chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial']) num1 = len(chrom) file = open(filename2, 'r') # serial_list, line_list = [], [] serial_list = -np.ones((num1,2)) f_list = np.zeros((num1,feature_dim)) lines = file.readlines() num_line = len(lines) cnt = -1 flag = 0 print(num_line,num1) # temp1 = int(num_line/2) for line in lines: if(line[0]==">"): # continue # line: >chr1:5-10 cnt = cnt + 1 str1 = line[1:] temp1 = str1.split(':') t_chrom = temp1[0] temp2 = temp1[1].split('-') t_start, t_stop = int(temp2[0]), int(temp2[1]) chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt] if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop): flag = 1 else: b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0] if len(b)>0: cnt = b[0] flag = 1 else: if flag == 1: line = line.strip().upper() vec = kmer_counting(line,K,kmer_dict1) # line_list.append(line) # f_list.append(vec) # line_list.append(line) # N_list.append(line.count('N')) flag = 0 serial_list[cnt,0], serial_list[cnt,1] = serial[cnt], line.count('N') f_list[cnt] = vec filename1 = '%s.vec'%(species_id) np.save(filename1,(serial_list,f_list)) return serial_list, f_list # load the annotation file and the sequence feature file # return kmer feature: num_samples*feature_dim # return one-hot encoding feature: num_samples*4*feature_dim def load_seq_1_ori(species_id, file1, filename2, K, kmer_dict1): # file1 = pd.read_csv(filename1,sep='\t') col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id) chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial']) label = np.asarray(file1['label']) group_label = np.asarray(file1['group_label']) signal = np.asarray(file1['signal']) num1 = len(chrom) len1 = stop-start seq_len = int(np.median(len1)) file = open(filename2, 'r') # serial_list, line_list = [], [] serial_list = -np.ones((num1,2)) feature_dim = len(kmer_dict1) f_list = np.zeros((num1,feature_dim)) f_mtx = np.zeros((num1,4,seq_len)) lines = file.readlines() num_line = len(lines) cnt = -1 flag = 0 print(num_line,num1) # temp1 = int(num_line/2) i = 0 for line in lines: if(line[0]==">"): # continue # line: >chr1:5-10 print(cnt) cnt = cnt + 1 str1 = line[1:] temp1 = str1.split(':') t_chrom = temp1[0] temp2 = temp1[1].split('-') t_start, t_stop = int(temp2[0]), int(temp2[1]) chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt] if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop): flag = 1 else: b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0] if len(b)>0: cnt = b[0] flag = 1 else: if flag == 1: line = line.strip().upper() vec = kmer_counting(line,K,kmer_dict1) # line_list.append(line) # f_list.append(vec) flag = 0 serial_list[cnt,0], serial_list[cnt,1] = serial[cnt], line.count('N') f_list[cnt] = vec f_mtx[cnt] = one_hot_encoding(line, seq_len) i += 1 if i % 100 == 0: print("%d of %d\r" %(i,num1), end = "") sys.stdout.flush() b = np.where(serial_list[:,0]>=0)[0] serial_list, f_list, f_mtx, label, group_label = serial_list[b], f_list[b], f_mtx[b], label[b], group_label[b] # filename1 = '%s.vec'%(species_id) # np.save(filename1,(serial_list,f_list)) return serial_list, f_list, f_mtx, label, group_label, signal # load feature def load_seq_altfeature_1(species_id, file1, filename2, output_filename): # file1 = pd.read_csv(filename1,sep='\t') col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id) chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial']) label = np.asarray(file1['label']) group_label = np.asarray(file1['group_label']) signal = np.asarray(file1['signal']) num1 = len(chrom) len1 = stop-start seq_len = int(np.median(len1)) file = open(filename2, 'r') # serial_list, line_list = [], [] serial_list = -np.ones((num1,3)) feature_dim = 3 # num1 = 2000 f_list = np.zeros((num1,feature_dim)) # f_mtx = np.zeros((num1,4,seq_len)) lines = file.readlines() num_line = len(lines) cnt = -1 flag = 0 print(num_line,num1) # temp1 = int(num_line/2) i = 0 serial_vec, seq_vec = [], [] for line in lines: if(line[0]==">"): # continue # line: >chr1:5-10 # print(cnt) cnt = cnt + 1 str1 = line[1:] temp1 = str1.split(':') t_chrom = temp1[0] temp2 = temp1[1].split('-') t_start, t_stop = int(temp2[0]), int(temp2[1]) chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt] if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop): flag = 1 else: b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0] if len(b)>0: cnt = b[0] flag = 1 else: if flag == 1: line = line.strip().upper() # vec = kmer_counting(line,K,kmer_dict1) serial_vec.append([cnt,serial[cnt]]) seq_vec.append(line) GC_profile = countCG(line) GC_profile1 = countCG_N(line) GC_skew = countCG_skew(line) vec = [GC_profile,GC_profile1,GC_skew] # line_list.append(line) # f_list.append(vec) flag = 0 serial_list[cnt,0], serial_list[cnt,1], serial_list[cnt,2] = serial[cnt], len(line), line.count('N') f_list[cnt] = vec # f_mtx[cnt] = one_hot_encoding(line, seq_len) i += 1 if i % 1000 == 0: print("%d of %d\r" %(i,num1), end = "") sys.stdout.flush() # if cnt>1000: # break # b = np.where(serial_list[:,0]>=0)[0] # serial_list, f_list, f_mtx, label, group_label = serial_list[b], f_list[b], f_mtx[b], label[b], group_label[b] # filename1 = '%s.vec'%(species_id) # np.save(filename1,(serial_list,f_list)) serial_vec = np.asarray(serial_vec) fields = ['index','serial','seq'] data1 = pd.DataFrame(columns=fields) data1[fields[0]], data1[fields[1]] = serial_vec[:,0], serial_vec[:,1] data1[fields[2]] = seq_vec # data1.to_csv('test_seq.txt',index=False,sep='\t') data1.to_csv(output_filename,index=False,sep='\t') return serial_list, f_list, label, group_label, signal # feature 1: GC profile # feature 2: GC skew # def load_seq_altfeature(filename2, K, kmer_dict1, sel_idx): def load_seq_altfeature(filename2, sel_idx): file2 = pd.read_csv(filename2,sep='\t') seq = np.asarray(file2['seq']) if len(sel_idx)>0: seq = seq[sel_idx] num1
from machaon.core.type import TypeDefinition import os import shutil import sys import tempfile import configparser import re import importlib import traceback from typing import Dict, Any, Union, List, Optional, Iterator from machaon.core.importer import module_loader, walk_modules, module_name_from_path, PyBasicModuleLoader from machaon.milestone import milestone, milestone_msg from machaon.package.repository import RepositoryURLError from machaon.core.docstring import DocStringParser, parse_doc_declaration # class DatabaseNotLoadedError(Exception): pass PACKAGE_TYPE_MODULES = 0x1 PACKAGE_TYPE_DEPENDENCY = 0x2 PACKAGE_TYPE_RESOURCE = 0x3 PACKAGE_TYPE_UNDEFINED = 0x4 PACKAGE_TYPE_SINGLE_MODULE = 0x5 class PACKAGE_LOAD_END: pass # # # class Package(): MODULES = PACKAGE_TYPE_MODULES SINGLE_MODULE = PACKAGE_TYPE_SINGLE_MODULE DEPENDENCY = PACKAGE_TYPE_DEPENDENCY RESOURCE = PACKAGE_TYPE_RESOURCE UNDEFINED = PACKAGE_TYPE_UNDEFINED def __init__(self, name: str, source: Any, type: int = None, module: Optional[str] = None, separate = True, hashval = None, scope = None ): self.name: str = name self.source = source self.separate = separate if type is None: type = PACKAGE_TYPE_MODULES self._type = type if self.is_type_modules(): self.scope = scope or self.name else: self.scope = None self.entrypoint: Optional[str] = module self._hash = hashval self._loaded: List[Exception] = [] self._modules: List[PyBasicModuleLoader] = [] # 読み込み済みモジュール self._extra_reqs: List[str] = [] # 追加の依存パッケージ名 def assign_definition(self, pkg): self.name = pkg.name self.source = pkg.source self.scope = pkg.scope self.separate = pkg.separate self.entrypoint = pkg.entrypoint self._type = pkg._type self._hash = pkg._hash return self @property def source_name(self): if self.source is None: raise ValueError("No source") return self.source.name def get_source_signature(self): if self.source is None: raise ValueError("No source") return self.source.get_source() def get_source(self): return self.source def is_remote_source(self) -> bool: return self.source.is_remote def is_module_source(self) -> bool: return getattr(self.source, "is_module", False) is True def is_installation_separated(self) -> bool: return self.separate def load_latest_hash(self) -> Optional[str]: if self.source is None: return None if self._hash is None: try: _hash = self.source.query_hash() except RepositoryURLError: _hash = None self._hash = "" if _hash is None else _hash return self._hash def is_type_modules(self) -> bool: return self._type == PACKAGE_TYPE_MODULES or self._type == PACKAGE_TYPE_SINGLE_MODULE def is_dependency_modules(self) -> bool: return self._type == PACKAGE_TYPE_DEPENDENCY def is_undefined(self) -> bool: return self._type == PACKAGE_TYPE_UNDEFINED def is_ready(self) -> bool: if self._type == PACKAGE_TYPE_RESOURCE: return False if self.entrypoint is None: raise ValueError("エントリモジュールが指定されていません") # エントリパスの親モジュールから順に確認する mparts = self.entrypoint.split(".") for i in range(len(mparts)): mp = ".".join(mparts[0:i+1]) spec = importlib.util.find_spec(mp) if spec is None: return False return True def check_required_modules_ready(self) -> Dict[str, bool]: """ 依存するmachaonパッケージのロード状況 """ rets = {} for module_name in self._extra_reqs: spec = importlib.util.find_spec(module_name) est = spec is not None rets[module_name] = est return rets def load_module_loaders(self): """ サブモジュールのローダを生成する """ modules = [] if self._type == PACKAGE_TYPE_UNDEFINED: raise PackageLoadError("パッケージの定義がありません") elif self._type not in (PACKAGE_TYPE_MODULES, PACKAGE_TYPE_SINGLE_MODULE, PACKAGE_TYPE_DEPENDENCY): return modules # モジュールのdocstringを読みに行く initial_module = module_loader(self.entrypoint) try: initial_module.load_module_declaration() except Exception as e: raise PackageLoadError(type(e).__name__, e) # docstringを解析する self._extra_reqs = initial_module.get_extra_requirements() modules: List[PyBasicModuleLoader] = [] modules.extend(initial_module.get_package_submodules()) # サブモジュールのロード if self._type == PACKAGE_TYPE_MODULES: if not modules: # 全てのサブモジュールを走査する basepkg = initial_module.module_name for pkgpath in initial_module.load_package_directories(): # 開始モジュールのディレクトリから下降する # 再帰を避けるためにスタック上にあるソースファイルパスを調べる skip_names = [] for fr in traceback.extract_stack(): fname = os.path.normpath(fr.filename) if fname.startswith(pkgpath): relname = module_name_from_path(fname, pkgpath, basepkg) skip_names.append(relname) for loader in walk_modules(pkgpath, basepkg): if loader.module_name in skip_names: continue modules.append(loader) elif self._type == PACKAGE_TYPE_SINGLE_MODULE: modules = [initial_module] return modules def load_type_definitions(self) -> Iterator[TypeDefinition]: """ モジュールにあるすべての型定義クラスを得る """ try: modules = self.load_module_loaders() except Exception as e: return self._loadfail(e) if not self.is_type_modules(): return if not modules: return self._loadfail("モジュールを1つも読み込めませんでした") typecount = 0 for modloader in modules: try: for typedef in modloader.scan_type_definitions(): typedef.scope = self.scope yield typedef typecount += 1 except Exception as e: self._loadfail(PackageModuleLoadError(e, str(modloader))) continue self._modules.append(modloader) if typecount == 0: self._loadfail(PackageLoadError("{}個のモジュールの中から型を1つも読み込めませんでした".format(len(modules)))) def get_module_count(self): """ ロードされたモジュールの数を返す """ if not self.once_loaded(): raise ValueError("Not loaded yet") return len(self._modules) # # ロード状態 # def reset_loading(self): """ ロード状態を空にする """ self._loaded.clear() def finish_loading(self): """ ロード終了のフラグをたてる """ self._loaded.append(PACKAGE_LOAD_END) def once_loaded(self): """ ロードが行われたか """ return len(self._loaded) > 0 def _loadfail(self, e): """ 内部で、ロードエラーを記録する Params: e(Exception): 例外オブジェクト """ self._loaded.append(e) def is_load_failed(self): """ ロードが失敗に終わったか """ if not self._loaded: return False # 未ロード時はFalse return self._loaded[0] is not PACKAGE_LOAD_END def is_load_succeeded(self): """ ロードが成功に終わったか """ if not self._loaded: return False # 未ロード時はFalse return self._loaded[0] is PACKAGE_LOAD_END def get_load_errors(self) -> List[Exception]: """ ロードエラーを全て返す """ errs = [] for x in self._loaded: if x is PACKAGE_LOAD_END: break errs.append(x) return errs def get_last_load_error(self) -> Optional[Exception]: """ 最後に起きたロードエラーを返す """ errors = self.get_load_errors() return errors[-1] if errors else None # # # def unload(self, typemodule): """ パッケージの読み込んだ全ての型を削除する """ if self._type == PACKAGE_TYPE_UNDEFINED: raise PackageLoadError("パッケージの定義がありません") if not self._loaded: return if self._type == PACKAGE_TYPE_MODULES: typemodule.remove_scope(self.scope) self._loaded.clear() def create_package(name, package, modules=None, **kwargs): """ 文字列の指定を受けてモジュールパッケージの種類を切り替え、読み込み前のインスタンスを作成する。 """ pkgtype = None if isinstance(package, str): host, sep, desc = package.partition(":") if not sep: raise ValueError("package: '{}' ':'でパッケージの種類を指定してください".format(package)) if host == "github": from machaon.package.repository import GithubRepArchive pkgsource, module = _parse_repository_source(desc, GithubRepArchive) elif host == "bitbucket": from machaon.package.repository import BitbucketRepArchive pkgsource, module = _parse_repository_source(desc, BitbucketRepArchive) elif host == "package": from machaon.package.archive import LocalModule module = desc pkgsource = LocalModule(module) elif host == "module": from machaon.package.archive import LocalModule module = desc pkgsource = LocalModule(module) pkgtype = PACKAGE_TYPE_SINGLE_MODULE elif host == "file": from machaon.package.archive import LocalFile pkgsource = LocalFile(desc) pkgtype = PACKAGE_TYPE_SINGLE_MODULE elif host == "package-arc": from machaon.package.archive import LocalArchive pkgsource = LocalArchive(desc) else: raise ValueError("package: '{}' サポートされていないホストです".format(host)) else: pkgsource = package if pkgtype is not None: kwargs["type"] = pkgtype return Package(name, pkgsource, module=module, **kwargs) def _parse_repository_source(src, repository_class): desc, sep, mod = src.rpartition(":") if not sep: desc = src mod = None rep = repository_class(desc) if not mod: mod = rep.name return rep, mod # class PackageNotFoundError(Exception): pass class PackageLoadError(Exception): def __init__(self, s, e=None): super().__init__(s, e) def child_exception(self): return super().args[1] def get_string(self): return super().args[0] class PackageModuleLoadError(Exception): def __init__(self, e, name): super().__init__(e, name) def child_exception(self): return super().args[0] def get_module_name(self): return super().args[1] # # # class PackageManager(): ALREADY_INSTALLED = milestone() DOWNLOAD_START = milestone_msg("total") DOWNLOADING = milestone_msg("size") DOWNLOAD_END = milestone_msg("total") DOWNLOAD_ERROR = milestone_msg("error") EXTRACTED_FILES = milestone_msg("path") NOT_INSTALLED = milestone() UNINSTALLING = milestone() PIP_INSTALLING = milestone() PIP_UNINSTALLING = milestone() PIP_MSG = milestone_msg("msg") PIP_END = milestone_msg("returncode") def __init__(self, directory, databasepath): self.dir = directory self.database = None # type: configparser.ConfigParser self._dbpath = databasepath self.load_database() def add_to_import_path(self): if self.dir not in sys.path: sys.path.insert(0, self.dir) def load_database(self, force=False): if not force and self.database is not None: return if os.path.isfile(self._dbpath): # ファイルを読み込む cfg = configparser.ConfigParser() with open(self._dbpath, "r", encoding="utf-8") as fi: cfg.read_file(fi) self.database = cfg else: # 空データ self.database = configparser.ConfigParser() return True def add_database(self, pkg, toplevel=None, infodir=None): self.check_database() if pkg.name not in self.database: self.database[pkg.name] = {} self.database.set(pkg.name, "source", pkg.get_source_signature()) self.database.set(pkg.name, "hash", pkg.load_latest_hash()) separated = pkg.is_installation_separated() if separated is not None: self.database.set(pkg.name, "separate", str(separated)) if toplevel is not None: self.database.set(pkg.name, "toplevel", toplevel) if infodir is not None: self.database.set(pkg.name, "infodir", infodir) self.save_database() def remove_database(self, name): self.check_database() self.database.remove_section(name) self.save_database() def save_database(self): if self.database is None: raise DatabaseNotLoadedError() if not os.path.isdir(self.dir): os.makedirs(self.dir) with open(self._dbpath, "w", encoding="utf-8") as fo: self.database.write(fo) print("save setting file '{}'".format(self._dbpath)) def check_database(self): if self.database is None: raise DatabaseNotLoadedError() def is_installed(self, pkg): self.check_database() if isinstance(pkg, Package): pkgname = pkg.name elif isinstance(pkg, str): pkgname = pkg else: raise TypeError(repr(pkg)) return self.database.has_section(pkgname) # def install(self, pkg: Package, newinstall: bool): if pkg.is_module_source(): # インストールは不要 return tmpdir = '' def cleanup_tmpdir(d): shutil.rmtree(d) return '' rep = pkg.get_source() if rep.is_remote: # ダウンロードする if not tmpdir: tmpdir = tempfile.mkdtemp() try: total = rep.query_download_size() yield PackageManager.DOWNLOAD_START.bind(total=total) arcfilepath = rep.get_arcfilepath(tmpdir) for size in rep.download_iter(arcfilepath): yield PackageManager.DOWNLOADING.bind(size=size, total=total) yield PackageManager.DOWNLOAD_END.bind(total=total) except RepositoryURLError as e: yield PackageManager.DOWNLOAD_ERROR.bind(error=e.get_basic()) tmpdir = cleanup_tmpdir(tmpdir) return except Exception: tmpdir = cleanup_tmpdir(tmpdir) return localpath = None if rep.is_archive: # ローカルに展開する if not tmpdir: tmpdir = tempfile.mkdtemp() try: arcfilepath = rep.get_arcfilepath(tmpdir) out = os.path.join(tmpdir, "content") os.mkdir(out) localpath = rep.extract(arcfilepath, out) except Exception: cleanup_tmpdir(tmpdir) return else: # 単にパスを取得する localpath = rep.get_local_path() # pipにインストールさせる yield PackageManager.PIP_INSTALLING try: if newinstall: yield from _run_pip( installtarget=localpath, installdir=self.dir if pkg.is_installation_separated() else None ) # pipが作成したデータを見に行く distinfo: Dict[str, str] = {} if pkg.is_installation_separated(): distinfo = _read_pip_dist_info(self.dir, pkg.source_name) # データベースに書き込む self.add_database(pkg, **distinfo) else: isseparate = self.database.getboolean(pkg.name, "separate", fallback=False) yield from _run_pip( installtarget=localpath, installdir=self.dir if isseparate else None, options=["--upgrade"] ) # データベースに書き込む self.add_database(pkg) finally: tmpdir = cleanup_tmpdir(tmpdir) # パッケージの情報を修正する
<reponame>211tbc/synthesis<filename>src/hmiscsv30writer.py #!/usr/bin/env python import os from sqlalchemy import or_, and_ from zope.interface import implementer import csv from .conf import settings #import exceptions from . import dbobjects from .writer import Writer @implementer(Writer) class HmisCSV30Writer(): # Writer Interface #implements (Writer) ######################## # Constant Definitions # ######################## files = \ { "export" : "Export.csv", "agency" : "AgencyProgram.csv", "siteInfo" : "SiteInformation.csv", "regions" : "Regions.csv", "inventory" : "BedInventory.csv", "client" : "Client.csv", "historical" : "ClientHistorical.csv", "participation" : "ProgramParticipation.csv", "serviceEvent" : "ServiceEvent.csv", "incBens" : "IncomeBenefits.csv" } exportHeader = \ [ "ExportIDStr", "SourceID", "SourceName", "SourceContactFirst", "SourceContactLast", "SourceContactPhone", "SourceContactExtension", "SourceContactEmail", "ExportDate", "ExportPeriodBegin", "ExportPeriodEnd", "ExportHashing", "SoftwareVendor", "SoftwareVersion", "AgencyFile", "BedInventoryFile", "ClientFile", "ClientHistoricalFile", "IncomeBenefitsFile", "OutcomeMeasuresFile", "RegionsFile", "Program_Participation", "ServiceEventFile", "SiteInformationFile", "Delta or Refresh" ] agencyHeader = \ [ "OrganizationID", "OrganizationName", "ProgramID", "ProgramName", "DirectServiceCode", "SiteID", "ProgramTypeCode", "TargetPopulationA", "TargetPopulationB", "TrackingMethod", "GranteeIdentifier", "ReceivesMcKinneyFunding", "DateCreated", "DateUpdated", "ExportIDStr" ] siteInfoHeader = \ [ "OrganizationID", "Setup Site ID", "Address", "City", "State", "Zip Code", "GeographicCode", "SiteServiceType", "HousingType", "DateUpdated", "ExportIDStr" ] regionsHeader = \ [ "OrganizationID", "SiteID", "RegionType", "RegionID", "RegionDescription", "DateUpdated", "ExportIDStr" ] inventoryHeader = \ [ "OrganizationID", "ProgramID", "SiteID", "AssetListID", "AssetListName", "HouseholdType", "BedType", "Availability", "BedInventory", "CHBedInventory", "UnitInventory", "InventoryStartDate", "InventoryEndDate", "HMISParticipatingBeds", "HMISParticipationStartDate", "HMISParticipationEndDate", "DateUpdated", "ExportIDStr" ] clientHeader = \ [ "OrganizationID", "PersonalIdentificationNumber", "LegalFirstName", "LegalMiddleName", "LegalLastName", "LegalSuffix", "SocialSecurityNumber", "SocialSecNumberQualityCode", "DateOfBirth", "DateOfBirthQualityCode", "PrimaryRace", "SecondaryRace", "Ethnicity", "Gender", "DateAdded", "DateUpdated", "UpdateOrDelete", "IdentityVerification", "ReleaseOfInformation", "ExportIDStr" ] historicalHeader = \ [ "PersonalIdentificationNumber", "OrganizationID", "ProgramID", "SiteID", "AssessmentDate", "DateUpdated", "IncomeTotalMonthly", "IncomeLast30Days", "NonCashBenefitsLast30Days", "PhysicalDisability", "ReceivePhysicalDisabilityServices", "HasDevelopmentalDisability", "ReceiveDevelopmentalDisabilityServices", "HasChronicHealthCondition", "ReceiveChronicHealthServices", "HasHIVAIDS", "ReceiveHIVAIDSServices", "HasMentalHealthProblem", "MentalHealthIndefinite", "ReceiveMentalHealthServices", "HasSubstanceAbuseProblem", "SubstanceAbuseIndefinite", "ReceiveSubstanceAbuseServices", "DomesticViolenceSurvivor", "DVOccurred", "CurrentlyEmployed", "HoursWorkedLastWeek", "EmploymentTenure", "LookingForWork", "CurrentlyInSchool", "VocationalTraining", "HighestSchoolLevel", "Degree", "HealthStatus", "PregnancyStatus", "DueDate", "ServiceEra", "MilitaryServiceDuration", "ServedInWarZone", "WarZone", "MonthsInWarZone", "ReceivedFire", "MilitaryBranch", "DischargeStatus", "ChildCurrentlyEnrolledInSchool", "ChildSchoolName", "ChildMcKinneyVentoLiaison", "ChildSchoolType", "ChildSchoolLastEnrolledDate", "ChildEnrollmentBarrier", "ExportIDStr" ] participationHeader = \ [ "PersonalIdentificationNumber", "OrganizationID", "ProgramID", "SiteID", "EntryDate", "ExitDate", "DateUpdated", "VeteranStatus", "DisablingCondition", "PriorResidence", "LengthOfStayAtPriorResidence", "ZIPCode", "ZIPQualityCode", "HousingStatusAtEntry", "HousingStatusAtExit", "HouseholdIdentificationNumber", "Destination", "ReasonForLeaving", "RelationshipToHeadOfHousehold", "HUDChronicHomeless", "ExportIDStr" ] serviceEventHeader = \ [ "PersonalIdentificationNumber", "OrganizationID", "ProgramID", "SiteID", "ServiceEventType", "ServiceEventStartDate", "ServiceEventEndDate", "ServiceCode", "ServiceAIRSCode", "IsReferral?", "Quantity/Frequency", "FinancialAssistanceAmount", "FundingCategory", "GrantIDNumber", "IsRecurring", "Period/Interval", "Advance/Arrears", "ContactTime", "ContactSite", "ClientEngaged", "AssetListID", "AssetID", "DomainIDCode", "DateUpdated", "ExportIDStr" ] incBensHeader = \ [ "PersonalIdentificationNumber", "OrganizationID", "ProgramID", "SiteID", "AssessmentDate", "DateUpdated", "IncomeBenefitType", "SourceCode", "SourceOther", "MonthlyAmount", "ExportIDStr" ] def __init__(self, outDirectory, processingOptions, debug=False, debugMessages=None): if settings.DEBUG: print("CSV Files to be created in: %s" % outDirectory) self.outDirectory = outDirectory #self.pickList = Interpretpicklist() self.errorMsgs = [] self.debug = debug print("Setting up dbobjects...") import time startReal = time.time() self.mappedObjects = dbobjects.DB() endReal = time.time() print("dbobjects setup finished after %0.2f real seconds." % (endReal - startReal)) if debug == True: print("Debug switch is: %s" % debug) self.debugMessages = debugMessages self.options = processingOptions self.openFiles = [] ################################### # Miscellaneous Utility Functions # ################################### def startTransaction(self): self.session = self.mappedObjects.session(echo_uow=True) print("Starting transaction...") def commitTransaction(self): self.session.commit() print("Transaction committed.") def openFile(self, fileName): try: filePath = os.path.join(self.outDirectory, fileName) print("Opening CSV output file %s for writing... " % filePath, end=' ') file1 = open(filePath, "wt+") print("opened.") return file1 except: print("Unable to open CSV output file %s for writing!" % filePath) raise def closeCsvFiles(self): print("Closing CSV output files... ", end=' ') for file1 in self.openFiles: try: file1.close() except: print("Unable to close CSV output file") raise print("all closed.") def outputStr(self, maxlen, str1): try: truncStr = str1[0:maxlen] except: truncStr = None return truncStr def outputInt(self, val): try: num = int(val) except: num = None return num def outputMoney(self, val): try: num = round(val, 2) except: num = None return num def outputDate(self, tsStr): try: dateStr = tsStr.strftime("%m/%d/%Y") except: dateStr = None return dateStr def outputTime(self, tsStr): try: timeStr = tsStr.strftime("%H:%M:%S") except: timeStr = None return timeStr def chooseId(self, val1, val2): if val1 == None: return val2 else: return val1 ########################################## # Database Column-level Access Functions # ########################################## def getHistoryRelatedColumnData(self, phIndex, table, *columns): query = "self.session.query(dbobjects.%s)" % table\ + ".filter(dbobjects.%s.person_historical_index_id == phIndex)" % table\ +".first()" row = eval(query) # TBD: Do we care which row record gets returned? if self.debug: print("\n* %s = %s" % (table, row)) retVal = [] for column in columns: if not row: retVal.append(None) continue try: retVal.append(eval("row.%s" % column)) except: retVal.append(None) if len(retVal) == 1: return retVal[0] else: return tuple(retVal) def getSchoolBarrier(self, cesIndex): barrier = self.session.query(dbobjects.ChildEnrollmentStatusBarrier)\ .filter(dbobjects.ChildEnrollmentStatusBarrier.child_enrollment_status_index_id == cesIndex).first() # TBD: Do we care which zipCode_status record gets returned? if not barrier: return None if self.debug: print("\n* barrier = ", barrier) try: barrierCd = barrier.barrier_code except: barrierCd = None return barrierCd def getRelationshipToHeadData(self, hhId): members = self.session.query(dbobjects.Household, dbobjects.Members)\ .filter(and_(or_(dbobjects.Household.household_id_num == hhId, dbobjects.Household.household_id_str == hhId), dbobjects.Household.id == dbobjects.Members.household_index_id))\ .first() if not members: return None if self.debug: print("\n* members = ", members) try: rel = members.relationship_to_head_of_household except: rel = None return rel def getPriorZipCodeData(self, phIndex): address = self.session.query(dbobjects.PersonAddress)\ .filter(and_(dbobjects.PersonAddress.person_historical_index_id == phIndex, dbobjects.PersonAddress.is_last_permanent_zip == 1)).first() # TBD: Do we care which zipCode_status record gets returned? if not address: return (None, None) if self.debug: print("\n* person_address = ", address) zipCode = None zipQual = None try: zipCode = address.zipcode zipQual = address.zip_quality_code except: pass return (zipCode, zipQual) def getReasonForLeavingData(self, sspIndex): reason = self.session.query(dbobjects.ReasonsForLeaving)\ .filter(dbobjects.ReasonsForLeaving.site_service_participation_index_id == sspIndex)\ .first() # TBD: Do we care which reason_status record gets returned? if not reason: return None if self.debug: print("\n* reasons_for_leaving=", reason) try: reasonCd = reason.reason_for_leaving except ZeroDivisionError: reasonCd = None return reasonCd def getPersonHistoricalIndexData(self, sspIndex): historical = self.session.query(dbobjects.PersonHistorical)\ .filter(dbobjects.PersonHistorical.site_service_index_id == sspIndex).first() # TBD: Do we care which person historical record's index gets returned? if not historical: return None if self.debug: print("\n* person_historical=", historical) try: phIndex = historical.id except: phIndex = None return phIndex def getRacesData(self, personIndex): races = self.session.query(dbobjects.Races)\ .filter(dbobjects.Races.person_index_id == personIndex) # TBD: Do we care about which two races get output? primaryRace = None secondaryRace = None try: primaryRace = races[0].race_unhashed secondaryRace = races[1].race_unhashed except: pass return (primaryRace, secondaryRace) def getReleaseGrantedData(self, personIndex): roi = self.session.query(dbobjects.ReleaseOfInformation)\ .filter(dbobjects.ReleaseOfInformation.person_index_id == personIndex)\ .first() if not roi: return None try: releaseGranted = roi.release_granted except: releaseGranted = None return releaseGranted def getReceivesMcKinneyFundingData(self, serviceIndex): funding = self.session.query(dbobjects.FundingSource)\ .filter(dbobjects.FundingSource.service_index_id == serviceIndex).first() if not funding: return None try: receivesMcKinneyFunding = funding.receives_mcKinney_funding except: receivesMcKinneyFunding = None return receivesMcKinneyFunding def getFundingSourceData(self, seIndex): funding = self.session.query(dbobjects.FundingSource)\ .filter(dbobjects.FundingSource.service_event_index_id == seIndex).first() if not funding: return None faAmt = None grantId = None advArrears = None try: faAmt = funding.financial_assistance_amount grantId = funding.federal_cfda_number advArrears = funding.advance_or_arrears except: pass return (faAmt, grantId, advArrears) ####################################### # Database Row-level Access Functions # ####################################### def getNonCashBenefitsData(self, phIndex): print("in gncbd") nonCashBens = self.session.query(dbobjects.NonCashBenefits)\ .filter(dbobjects.NonCashBenefits.person_historical_index_id == phIndex) if not nonCashBens.count(): return for nonCashBen in nonCashBens: try: if self.debug: print("\n* non_cash_benefits=", nonCashBen) yield nonCashBen except: print("Unable to obtain data from non_cash_benefits table!") raise def getIncomeAndSourcesData(self, phIndex): print("in gisd") incomes = self.session.query(dbobjects.IncomeAndSources)\ .filter(dbobjects.IncomeAndSources.person_historical_index_id == phIndex) if not incomes.count(): return for income in incomes: try: if self.debug: print("\n* income_and_sources=", income) yield income except: print("Unable to obtain data from income_and_sources table!") raise def getPersonHistoricalData(self, personIndex, personId): historicals = self.session.query(dbobjects.PersonHistorical)\ .filter(dbobjects.PersonHistorical.person_index_id == personIndex) if not historicals.count(): print("Warning: no data in person_historical table for person %s." \ % personId) return else: self.historicalFile = self.openFile(HmisCSV30Writer.files["historical"]) self.openFiles.append(self.historicalFile) self.historicalWriter = csv.writer(self.historicalFile, quoting=csv.QUOTE_NONNUMERIC) self.historicalWriter.writerow(HmisCSV30Writer.historicalHeader) for historical in historicals: try: if self.debug: print("\n* person_historical=", historical) yield historical except: print("Unable to obtain data from person_historical table!") raise def getServiceEventData(self, personIndex, personId): serviceEvents
# coding: utf-8 """ THORChain API This documentation outlines the API for THORChain. NOTE: This document is a **work in progress**. # noqa: E501 OpenAPI spec version: Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from thornode_client.api_client import ApiClient class PoolsApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def get_a_liquidity_provider_of_a_pool(self, asset, address, **kwargs): # noqa: E501 """Get a liquidity provider of a pool # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_a_liquidity_provider_of_a_pool(asset, address, async_req=True) >>> result = thread.get() :param async_req bool :param str asset: The asset of the pool (required) :param str address: The address of the liquidity provider (required) :return: object If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_a_liquidity_provider_of_a_pool_with_http_info(asset, address, **kwargs) # noqa: E501 else: (data) = self.get_a_liquidity_provider_of_a_pool_with_http_info(asset, address, **kwargs) # noqa: E501 return data def get_a_liquidity_provider_of_a_pool_with_http_info(self, asset, address, **kwargs): # noqa: E501 """Get a liquidity provider of a pool # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_a_liquidity_provider_of_a_pool_with_http_info(asset, address, async_req=True) >>> result = thread.get() :param async_req bool :param str asset: The asset of the pool (required) :param str address: The address of the liquidity provider (required) :return: object If the method is called asynchronously, returns the request thread. """ all_params = ['asset', 'address'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_a_liquidity_provider_of_a_pool" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'asset' is set if self.api_client.client_side_validation and ('asset' not in params or params['asset'] is None): # noqa: E501 raise ValueError("Missing the required parameter `asset` when calling `get_a_liquidity_provider_of_a_pool`") # noqa: E501 # verify the required parameter 'address' is set if self.api_client.client_side_validation and ('address' not in params or params['address'] is None): # noqa: E501 raise ValueError("Missing the required parameter `address` when calling `get_a_liquidity_provider_of_a_pool`") # noqa: E501 collection_formats = {} path_params = {} if 'asset' in params: path_params['asset'] = params['asset'] # noqa: E501 if 'address' in params: path_params['address'] = params['address'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/thorchain/pool/{asset}/liquidity_provider/{address}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_a_pool(self, asset, **kwargs): # noqa: E501 """Get a pool # noqa: E501 Retrieve a liquidity pool with the given asset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_a_pool(asset, async_req=True) >>> result = thread.get() :param async_req bool :param str asset: The asset of the pool (required) :return: object If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_a_pool_with_http_info(asset, **kwargs) # noqa: E501 else: (data) = self.get_a_pool_with_http_info(asset, **kwargs) # noqa: E501 return data def get_a_pool_with_http_info(self, asset, **kwargs): # noqa: E501 """Get a pool # noqa: E501 Retrieve a liquidity pool with the given asset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_a_pool_with_http_info(asset, async_req=True) >>> result = thread.get() :param async_req bool :param str asset: The asset of the pool (required) :return: object If the method is called asynchronously, returns the request thread. """ all_params = ['asset'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_a_pool" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'asset' is set if self.api_client.client_side_validation and ('asset' not in params or params['asset'] is None): # noqa: E501 raise ValueError("Missing the required parameter `asset` when calling `get_a_pool`") # noqa: E501 collection_formats = {} path_params = {} if 'asset' in params: path_params['asset'] = params['asset'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/thorchain/pool/{asset}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_all_liquidity_providers_of_a_pool(self, asset, **kwargs): # noqa: E501 """Get all liquidity providers of a pool # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_liquidity_providers_of_a_pool(asset, async_req=True) >>> result = thread.get() :param async_req bool :param str asset: The asset of the pool (required) :return: list[object] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_liquidity_providers_of_a_pool_with_http_info(asset, **kwargs) # noqa: E501 else: (data) = self.get_all_liquidity_providers_of_a_pool_with_http_info(asset, **kwargs) # noqa: E501 return data def get_all_liquidity_providers_of_a_pool_with_http_info(self, asset, **kwargs): # noqa: E501 """Get all liquidity providers of a pool # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_liquidity_providers_of_a_pool_with_http_info(asset, async_req=True) >>> result = thread.get() :param async_req bool :param str asset: The asset of the pool (required) :return: list[object] If the method is called asynchronously, returns the request thread. """ all_params = ['asset'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_all_liquidity_providers_of_a_pool" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'asset' is set if self.api_client.client_side_validation and ('asset' not in params or params['asset'] is None): # noqa: E501 raise ValueError("Missing the required parameter `asset` when calling `get_all_liquidity_providers_of_a_pool`") # noqa: E501 collection_formats = {} path_params = {} if 'asset' in params: path_params['asset'] = params['asset'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/thorchain/pool/{asset}/liquidity_providers', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[object]', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_all_the_liquidity_pools(self, **kwargs): # noqa: E501 """Get all the liquidity pools # noqa: E501 Retrieve all the liquidity pools from THORChain # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_the_liquidity_pools(async_req=True) >>> result = thread.get() :param async_req bool :return: list[object] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_the_liquidity_pools_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_all_the_liquidity_pools_with_http_info(**kwargs) # noqa: E501 return data def get_all_the_liquidity_pools_with_http_info(self, **kwargs): # noqa: E501 """Get all the liquidity pools # noqa: E501 Retrieve all the liquidity pools from THORChain # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_the_liquidity_pools_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: list[object] If the method is called asynchronously, returns the request thread. """ all_params = [] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise
<reponame>lpltk/pydriver<filename>pydriver/evaluation/evaluation.py # -*- coding: utf-8 -*- from __future__ import absolute_import, division import math, operator import numpy as np from ..common.constants import FLOAT_dtype class Evaluator(object): """Evaluation curve representation Average precision and orientation similarity measures will compute the area under the monotonically decreasing function of maximum performance w.r.t. increasing minimum recall values. The negative category of hypotheses is processed correctly in the sense that it will be ignored during matching of hypotheses to ground truth. Evaluation is performed under the assumption that the exact positive category does not matter. Create multiple instances for different categories and only supply ground truth / hypotheses of desired category if you want to evaluate a multi-category recognition scenario. """ def __init__(self, minOverlap = 0.5, nPoints = 0, nRecallIntervals = 0): """Initialize Evaluator Overlapping is computed as *intersection(ground truth, hypothesis) / union(ground truth, hypothesis)* considering their 2D bounding boxes. Minimum overlap is the criterion for matching hypotheses to ground truth. The number of individual points will correspond to the number of (different) weights of supplied hypotheses for nPoints<=0 or their minimum weights will be linearly spaced between minimum and maximum weights of those hypotheses for nPoints>0. Minimum recall values for averaging will be drawn from all available points for nRecallIntervals<=0 or will be linearly spaced between 0.0 and 1.0 (e.g. 10 intervals for nRecallIntervals=10: minimum recall = 0.0, 0.1, ..., 0.9, 1.0) for nRecallIntervals>0. :Parameters: minOverlap: float, optional Minimum overlap between ground truth and hypothesis to match them to each other, *0.5* by default nPoints: int, optional Number of individual points to evaluate, *0* by default nRecallIntervals: int, optional Number of intervals for average measures, *0* by default """ # minimal overlap for matching self._minOverlap = minOverlap # number of points self._nPoints = nPoints # number of recall intervals self._nRecallIntervals = nRecallIntervals # undocumented flag for emulating KITTI evaluation, use with caution self._emulateKITTI = False # added frames self._frames = [] # initialize cache self._cache = {} self._resetCache() def addFrame(self, groundTruth, groundTruthOpt, hypotheses): """Add frame for evaluation Function expects everything as list of labels (see :class:`~pydriver.datasets.base.BaseReader` for format description). The cached points will be updated. :Parameters: groundTruth : list Labels with mandatory ground truth groundTruthOpt : list Labels with optional ground truth hypotheses : list Labels with hypotheses, must contain 'info' dictionary with 'weight' key """ frame = { 'gt': groundTruth, 'gtOpt': groundTruthOpt, 'hyp': [h for h in hypotheses if h['category'] != 'negative'], # ignore negative hypotheses } self._frames.append(frame) # update existing points for evPoint in self._cache['points'].values(): evPoint.addFrame(frame['gt'], frame['gtOpt'], frame['hyp']) # update stored weights (set of unique values) for hyp in frame['hyp']: self._cache['weights'].add(hyp['info']['weight']) # reset all other cached values self._resetCache() def getPoint(self, minWeight): """Get :class:`EvaluatorPoint` corresponding to all hypotheses above given minimum weight The result will be cached. """ # get all hypotheses weights as sorted NumPy array weights = self._getWeights(nPoints = 0) # find the minimal existing hypothesis weight >= minWeight realMinWeightIndex = np.searchsorted(weights, minWeight) if realMinWeightIndex == weights.shape[0]: # no hypotheses >= minWeight # map all those requests to a point above any possible weight realMinWeight = np.inf else: realMinWeight = weights[realMinWeightIndex] # replace minWeight with realMinWeight # the point: avoid constructing identical points between two hypotheses again and again minWeight = realMinWeight if minWeight not in self._cache['points']: evPoint = EvaluatorPoint(minOverlap = self._minOverlap, minWeight = minWeight) for frame in self._frames: evPoint.addFrame(frame['gt'], frame['gtOpt'], frame['hyp']) self._cache['points'][minWeight] = evPoint return self._cache['points'][minWeight] def getPoints(self): """Get all points according to self._nPoints as list of :class:`EvaluatorPoint`""" if 'getPoints' not in self._cache: # get all desired points (possibly multiple pointers to same instance) points = [self.getPoint(minWeight) for minWeight in self._getWeights(self._nPoints)] # get their sorted unique minimum weights minWeights = sorted(set([point.minWeight for point in points])) # get only unique instances points = [self.getPoint(minWeight) for minWeight in minWeights] # cache the result self._cache['getPoints'] = points return self._cache['getPoints'] def getValues(self): """Get recall, precision and OS values suitable for plotting The function returns a dictionary with keys 'recall', 'precision' and 'OS'. Each of them contains a list with respective values sorted by recall. The produced recall/measure curves are convex. """ valuesRecall = [0] + [recall for recall, length in self._getIntervals(self._nRecallIntervals)] valuesPrecision = [self._getMaxPrecision(recall) for recall in valuesRecall] valuesOS = [self._getMaxOS(recall) for recall in valuesRecall] return {'recall': valuesRecall, 'precision': valuesPrecision, 'OS': valuesOS} @property def aprecision(self): """Average precision""" return self._avalue(self._getMaxPrecision, self._nRecallIntervals) @property def aos(self): """Average orientation similarity""" return self._avalue(self._getMaxOS, self._nRecallIntervals) # --- internal functions --- def _resetCache(self): result = {} # points: cache for EvaluatorPoint objects (minWeight: EvaluatorPoint) if 'points' not in self._cache: result['points'] = {} # create empty points dictionary else: result['points'] = self._cache['points'] # preserve existing points # weights: cache for unique hypotheses weights (set of unique values) if 'weights' not in self._cache: result['weights'] = set() # create empty set of weights else: result['weights'] = self._cache['weights'] # preserve existing weights self._cache = result def _avalue(self, valuefunc, nRecallIntervals): """Average measure""" return sum([valuefunc(minRecall)*length for minRecall,length in self._getIntervals(nRecallIntervals)]) def _getIntervals(self, nRecallIntervals): """Get recall values and the lengths they are covering A value <=0 for nRecallIntervals will use all available recall values from assigned evaluators. """ if self._emulateKITTI: # emulate KITTI evaluation, don't pass nRecallIntervalls since KITTI uses exactly 11 return self._getIntervalsKITTI() if nRecallIntervals > 0: # evenly spaced intervals minRecalls = np.linspace(0, 1, nRecallIntervals+1) else: # all unique recall values minRecalls = sorted(set([0] + [evPoint.recall for evPoint in self.getPoints()])) # compute differences between n-th and (n+1)-th recall value lengths = np.diff(minRecalls) # return tuples of recall values and their corresponding lengths (without the first value which covers zero length) return [(minRecalls[i+1], lengths[i]) for i in range(lengths.shape[0])] def _getIntervalsKITTI(self, nRecallIntervals = 11): """Test function for emulating KITTI averaging""" if nRecallIntervals < 1: nRecallIntervals = 11 minRecalls = np.linspace(0, 1, nRecallIntervals) lengths = [1.0 / nRecallIntervals] * nRecallIntervals return [(minRecalls[i], lengths[i]) for i in range(nRecallIntervals)] def _getWeights(self, nPoints = 0): """Get mininum weights for points Get all unique weight values of supplied hypotheses for nPoints<=0 or use linear spacing between minimum and maximum weight otherwise. The result is a NumPy array. """ if 'getWeightsExact' not in self._cache: # convert stored set to sorted NumPy array self._cache['getWeightsExact'] = np.array(sorted(self._cache['weights']), dtype = np.float) weights = self._cache['getWeightsExact'] if nPoints > 0: # create linear spacing if weights.shape[0] < 2: weights = np.linspace(0, 1, nPoints) else: weights = np.linspace(weights.min(), weights.max(), nPoints) return weights def _getMaxPrecision(self, minRecall): """Get maximal precision of evaluators with specified minimum recall""" return max([0] + [evPoint.precision for evPoint in self._getMinRecallEvaluators(minRecall)]) def _getMaxOS(self, minRecall): """Get maximal orientation similarity of evaluators with specified minimum recall""" return max([0] + [evPoint.os for evPoint in self._getMinRecallEvaluators(minRecall)]) def _getMinRecallEvaluators(self, minRecall): """Get evaluators with specified minimum recall""" if 'getMinRecallEvaluators' not in self._cache: self._cache['getMinRecallEvaluators'] = {} if minRecall not in self._cache['getMinRecallEvaluators']: self._cache['getMinRecallEvaluators'][minRecall] = [evPoint for evPoint in self.getPoints() if evPoint.recall >= minRecall] return self._cache['getMinRecallEvaluators'][minRecall] class EvaluatorPoint(object): """Evaluation point representation""" def __init__(self, minOverlap = 0.5, minWeight = 0.0): """Initialize Evaluator instance Overlapping is computed as *intersection(ground truth, hypothesis) / union(ground truth, hypothesis)* considering their 2D bounding boxes. Minimum overlap is the criterion for matching hypotheses to ground truth. :Parameters: minOverlap: float, optional Minimum overlap between ground truth and hypothesis to count the latter as true positive, *0.5* by default minWeight: float, optional Dismiss hypotheses with lesser weight, *0.0* by default """ self._minOverlap = minOverlap self._minWeight = minWeight self.TP = 0 # true positives self.FN = 0 # false negatives self.FP = 0 # false positives self._os_sum = 0 # non-normalized orientation similarity @property def minOverlap(self): return self._minOverlap @property def minWeight(self): return self._minWeight @property def objects(self): """Number of ground truth objects (detected and missed)""" return self.TP + self.FN @property def detections(self): """Number of detections (true and false)""" return self.TP + self.FP @property def recall(self): """Recall""" if self.objects > 0: return self.TP / self.objects else: return 0.0 @property def precision(self): """Precision""" if self.detections > 0: return self.TP / self.detections else: return 0.0 @property def os(self): """Normalized orientation similarity""" if self.detections
#!/usr/bin/env python # coding: utf-8 # In[ ]: def validate_getConfig_body(rdata): """ Method validate getConfig's body for right structure and not empty, rdata = { "data": [...] } INPUTS: rdata as dict(body) OUTPUT: Returns boolean, True when structure is valid, False when structure is invalid. """ if isinstance(rdata, dict) is False: logging.error("<JSON Body Error: boby passed is not a dictionary...>") return False elif "data" not in rdata: logging.error("<JSON Body Error: 'data' key is missing...>") return False elif isinstance(rdata["data"], list) is False: logging.error("<JSON Body Error: 'data' value is not a list...>") return False elif not rdata["data"]: logging.error("<JSON Body Error: 'data' is empty...>") return False else: return True # In[ ]: def validate_setConfig_body(rdata): """ Method validate setConfig's body for right structure and not empty, rdata = { "data": {...} } INPUTS: rdata as dict(body) OUTPUT: Returns boolean, True when structure is valid, False when structure is invalid. """ if isinstance(rdata, dict) is False: logging.error("<JSON Body Error: boby passed is not a dictionary...>") return False elif "data" not in rdata: logging.error("<JSON Body Error: 'data' key is missing...>") return False elif isinstance(rdata["data"], dict) is False: logging.error("<JSON Body Error: 'data' value is not a dictionary...>") return False elif not rdata["data"]: logging.error("<JSON Body Error: 'data' is empty...>") return False else: return True # In[ ]: def getCallConnectionInfo(call, dest, pdmssp=False): """ Method expects call dict input from output from getCallStatusV2() method. Extract call info - RemotePartyNumber, CallState, CallHandle and LineID, from call if dest matches. INPUTS: call as dict(body), dest as string OUTPUT: Returns response body as dict when dest match successful, None when match unsuccessful. """ body_dict = {} if not pdmssp: for i in range(len(call["data"])): # Removes 'sip:' from RemotePartyNumber if present. body_dict["RemotePartyNumber"] = re.sub( "^sip:", "", call["data"][i]["RemotePartyNumber"] ) if body_dict["RemotePartyNumber"] == dest: body_dict["CallState"] = call["data"][i]["CallState"] body_dict["CallHandle"] = call["data"][i]["CallHandle"] body_dict["LineID"] = call["data"][i]["LineID"] elif pdmssp: for i in range(len(call["data"]["body"]["data"])): # Removes 'sip:' from RemotePartyNumber if present. body_dict["RemotePartyNumber"] = re.sub( "^sip:", "", call["data"]["body"]["data"][i]["RemotePartyNumber"] ) if body_dict["RemotePartyNumber"] == dest: body_dict["CallState"] = call["data"]["body"]["data"][i]["CallState"] body_dict["CallHandle"] = call["data"]["body"]["data"][i]["CallHandle"] body_dict["LineID"] = call["data"]["body"]["data"][i]["LineID"] return body_dict # In[ ]: import datetime import json import logging import re import requests import time import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) class vvx(): """ Class for VVX based on UCS 6.1.0 REST APIs. INPUTS for instance creation: - ipaddr = IP address of phone, auth = username and password in tuple OR - pdmssp = True, macaddr = mac address of phone, pdmssp_credentials = { 'client_id', 'client_secret', 'org_id'} Attributes: .ipaddr .phone_credentials .use_https .verify_secure .model .firmware .macaddress .lines .linescount .linestates Additional PDMS-SP Attributes: .client_id .client_secret .org_id .token .device_id .obi_number Methods: .pdmssp_getToken() - calls PDMS-SP to generate & retrieve authorization access_token .pdmssp_getDeviceId() - calls PDMS-SP to retrieve DeviceID of device on PDMS-SP .getDeviceInfoV2() - calls "/api/v2/mgmt/device/info" .getNetwork() - calls "/api/v1/mgmt/network/info" .getLineInfoV2() - calls "/api/v2/mgmt/lineInfo" .getCallStatusV2() - calls "/api/v2/webCallControl/callStatus" .getRunningConfig() - calls "/api/v1/mgmt/device/runningConfig" .getDeviceStats() - calls "/api/v1/mgmt/device/stats" .getNetworkStats() - calls "/api/v1/mgmt/network/stats" .getSessionStats() - calls "/api/v1/mgmt/media/sessionStats" .getCallLogs() - calls "/api/v1/mgmt/callLogs" .getConfig() - calls "/api/v1/mgmt/config/get" .setConfig() - calls "/api/v1/mgmt/config/set" .callDial() - "/api/v1/callctrl/dial" .callEnd() - "/api/v1/callctrl/endCall" .callMute() - "/api/v1/callctrl/mute" .sendDTMF() - "/api/v1/callctrl/sendDTMF" .callAnswer() - "/api/v1/callctrl/answerCall" .callIgnore() - "/api/v1/callctrl/ignoreCall" .callReject() - "/api/v1/callctrl/rejectCall" .callHold() - "/api/v1/callctrl/holdCall" .callResume() - "/api/v1/callctrl/resumeCall" .simulateKeyEvent() - calls "/api/v1/mgmt/simulateKeyEvent" .simulateTextInput() - calls "/api/v1/mgmt/simulateTextInput" .safeRestart() - calls "/api/v1/mgmt/safeRestart" .safeReboot() - calls "/api/v1/mgmt/safeReboot" .factoryReset() - calls "/api/v1/mgmt/factoryReset" .updateConfig() - calls "/api/v1/mgmt/updateConfiguration" .resetConfig() - calls "/api/v1/mgmt/configReset" """ _qpaths_dict={ "sipStatus" : "/api/v1/webCallControl/sipStatus", "network" : "/api/v1/mgmt/network/info", "deviceinfov2" : "/api/v2/mgmt/device/info", "lineinfov2" : "/api/v2/mgmt/lineInfo", "runningConfig" : "/api/v1/mgmt/device/runningConfig", "getconfig" : "/api/v1/mgmt/config/get", "setconfig" : "/api/v1/mgmt/config/set", "simulateTextInput" : "/api/v1/mgmt/simulateTextInput", "simulateKeyEvent" : "/api/v1/mgmt/simulateKeyEvent", "callstatusv2" : "/api/v2/webCallControl/callStatus", "calldial" : "/api/v1/callctrl/dial", "callend" : "/api/v1/callctrl/endCall", "callmute" : "/api/v1/callctrl/mute", "sendDTMF" : "/api/v1/callctrl/sendDTMF", "callresume" : "/api/v1/callctrl/resumeCall", "callanswer" : "/api/v1/callctrl/answerCall", "callignore" : "/api/v1/callctrl/ignoreCall", "callreject" : "/api/v1/callctrl/rejectCall", "callhold" : "/api/v1/callctrl/holdCall", "callresume" : "/api/v1/callctrl/resumeCall", "devicestats" : "/api/v1/mgmt/device/stats", "networkstats" : "/api/v1/mgmt/network/stats", "sessionStats" : "/api/v1/mgmt/media/sessionStats", "callLogs" : "/api/v1/mgmt/callLogs", "callLogs_missed" : "/api/v1/mgmt/callLogs/missed", "callLogs_received" : "/api/v1/mgmt/callLogs/received", "callLogs_placed" : "/api/v1/mgmt/callLogs/placed", "safeRestart" : "/api/v1/mgmt/safeRestart", "safeReboot" : "/api/v1/mgmt/safeReboot", "factoryReset" : "/api/v1/mgmt/factoryReset", "updateConfig" : "/api/v1/mgmt/updateConfiguration", "resetConfig" : "/api/v1/mgmt/configReset", "resetConfig_cloud" : "/api/v1/mgmt/configReset/cloud", "resetConfig_local" : "/api/v1/mgmt/configReset/local", "resetConfig_web" : "/api/v1/mgmt/configReset/web", "resetConfig_device" : "/api/v1/mgmt/configReset/device" } _pdmssp_baseurl="https://pcs-api-na.obitalk.com" _access_token_path = "/api/v2/oauth/client_credential/accesstoken" _domain_path = "/api/v2/domain/" _valid_versions = ("6.0.0", "6.1.0",) def __init__(self, ipaddr="", phone_credentials=(), use_https=True, verify_secure=False, pdmssp=False, pdmssp_credentials={}, macaddr="", loglevel="INFO"): logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') level = logging.getLevelName(loglevel) logger = logging.getLogger() logger.setLevel(level) # initiates requests.Session() vvx_adapter = requests.adapters.HTTPAdapter(max_retries=3) self.__session = requests.Session() if not pdmssp: # direct phone query scenario self.ipaddr = ipaddr self.phone_credentials = phone_credentials self.use_https = use_https self.verify_secure = verify_secure self.macaddress = macaddr self.model = None self.firmware = None self._swVer = None self.lines = {} self.linestates = {} self.linescount = None if self.use_https: # apply HTTPAdapter settings to url self.__session.mount(f"https://{self.ipaddr}", vvx_adapter) else: # apply HTTPAdapter settings to url self.__session.mount(f"http://{self.ipaddr}", vvx_adapter) try: # Extracts attributes' values for model, firmware, macaddress and _swVer dev = self.getDeviceInfoV2() if dev: self.model = dev["data"]["ModelNumber"] self.firmware = dev["data"]["Firmware"]["Application"] self.macaddress = dev["data"]["MACAddress"] for item in self._valid_versions: if self.firmware.startswith(item): self._swVer = item # *** setConfig/getConfig not working on PDMSSP, skipping altogther *** # Extracts attribute's value for baseprofile #dev = self.getConfig({ "data" : [ "device.baseProfile" ] }) #if dev != None: # self.baseprofile = dev["data"]["device.baseProfile"]["Value"] #else: # self.baseprofile = None # Extracts information about lines, count and state. dev = self.getLineInfoV2() if dev: self.linescount = len(dev["data"]) for i in range(self.linescount): if dev["data"][i]["CallServers"]: p = i self.lines[i+1] = dev["data"][i]["Label"] self.linestates[dev["data"][i]["Label"]] = dev["data"][i]["RegistrationStatus"] else: self.lines[i+1] = dev["data"][p]["Label"] self.linestates[dev["data"][p]["Label"]] = dev["data"][i]["RegistrationStatus"] except Exception: pass logging.debug(f"Device ip-address: {self.ipaddr}") logging.debug(f"Device model: {self.model}") logging.debug(f"Device firmware: {self.firmware}") logging.debug(f"Device mac address: {self.macaddress}") # *** setConfig/getConfig not working on PDMSSP*** #logging.debug(f"Device base profile: {self.baseprofile}") logging.debug(f"Device lines: {self.lines}") logging.debug(f"Device lines count: {self.linescount}") logging.debug(f"Device lines states: {self.linestates}") elif pdmssp: # pdmssp query scenario self.ipaddr = ipaddr self.phone_credentials = phone_credentials self.use_https = use_https self.verify_secure = verify_secure self.macaddress = macaddr self.model = None self.firmware = None self._swVer = None self.lines = {} self.linestates = {} self.linescount = None self.token = None self.device_id = None self.obi_number = None self.client_id = pdmssp_credentials["client_id"] self.client_secret = pdmssp_credentials["client_secret"] self.org_id = pdmssp_credentials["org_id"] # apply HTTPAdapter settings to url self.__session.mount(self._pdmssp_baseurl, vvx_adapter) try: # get access_token from pdmssp using Org's client_id & client_secret. dev = self.pdmssp_getToken() if dev: self.token = dev["access_token"] # get device_id from pdmssp using access_token, org_id & macaddr dev = self.pdmssp_getDeviceId() if dev["data"]: self.device_id = dev["data"][0]["id"] self.obi_number = dev["data"][0]["obiNumber"] dev = self.getDeviceInfoV2(True) if dev["data"]: self.model = dev["data"]["body"]["data"]["ModelNumber"] self.firmware = dev["data"]["body"]["data"]["Firmware"]["Application"] for item in self._valid_versions: if self.firmware.startswith(item): self._swVer = item # *** setConfig/getConfig not working on PDMSSP, skipping altogther *** # Extracts attribute's value for baseprofile #dev = self.getConfig({ "data" : [ "device.baseProfile" ] }, True) #if dev != None: # self.baseprofile = dev["data"]["device.baseProfile"]["Value"] #else: # self.baseprofile = None # Extracts information about lines, count and state. dev = self.getLineInfoV2(True) if dev["data"]: self.linescount = len(dev["data"]["body"]["data"]) for i in range(self.linescount): if dev["data"]["body"]["data"][i]["CallServers"]: p = i self.lines[i+1] = dev["data"]["body"]["data"][i]["Label"] self.linestates[dev["data"]["body"]["data"][i]["Label"]] = dev["data"]["body"]["data"][i]["RegistrationStatus"] else: self.lines[i+1] = dev["data"]["body"]["data"][p]["Label"] self.linestates[dev["data"]["body"]["data"][p]["Label"]] = dev["data"]["body"]["data"][i]["RegistrationStatus"] except Exception: pass logging.debug(f"Device id of device on PDMS-SP: {self.device_id}") logging.debug(f"Device obi-number of device on PDMS-SP: {self.obi_number}") logging.debug(f"Device model on PDMS-SP: {self.model}") logging.debug(f"Device firmware on PDMS-SP: {self.firmware}") logging.debug(f"Device mac address on PDMS-SP: {self.macaddress}") # *** setConfig/getConfig not working on PDMSSP*** #logging.debug(f"Device base profile: {self.baseprofile}") logging.debug(f"Device lines: {self.lines}") logging.debug(f"Device lines count: {self.linescount}") logging.debug(f"Device lines states: {self.linestates}") def __httpRequest(self, qpath="", rtype="GET", params={}, headers={}, rdata={}, pdmssp=False, pdmssp_url=""): """ Method makes HTTP request using requests. INPUTS: qpath as string (qpath is key value taken from qpaths_dict, meant for direct phone api call), rtype as string(supported - GET, POST), params as dict, headers as dict, rdata as dict, pdmssp as boolean, pdmssp_url as str OUTPUT: Response JSON object from requests.get() when successful, None when unsuccessful. """ s = self.__session if not pdmssp: if self.use_https: target_url = f"https://{self.ipaddr}{self._qpaths_dict[qpath]}" else: target_url = f"http://{self.ipaddr}{self._qpaths_dict[qpath]}" try: if not pdmssp: if rtype == "GET": r = s.get(url=target_url, auth=self.phone_credentials, verify=self.verify_secure, timeout=(30, 30)) r.raise_for_status() elif rtype == "POST": r = s.post(url=target_url, params=params, headers=headers, data=json.dumps(rdata),
transaction and returns the result. Beware: lists of (small) integers may be (falsely) returned as a string - use str_to_list() to convert such strings. """ result = self.req_list(self.new_req_list().add_read(key))[0] return self.process_result_read(result) def write(self, key, value): """ Issues a write operation to scalaris and adds it to the current transaction. """ result = self.req_list(self.new_req_list().add_write(key, value))[0] self.process_result_write(result) def add_del_on_list(self, key, to_add, to_remove): """ Issues a add_del_on_list operation to scalaris and adds it to the current transaction. Changes the list stored at the given key, i.e. first adds all items in to_add then removes all items in to_remove. Both, to_add and to_remove, must be lists. Assumes en empty list if no value exists at key. """ result = self.req_list(self.new_req_list().add_add_del_on_list(key, to_add, to_remove))[0] self.process_result_add_del_on_list(result) def add_on_nr(self, key, to_add): """ Issues a add_on_nr operation to scalaris and adds it to the current transaction. Changes the number stored at the given key, i.e. adds some value. Assumes 0 if no value exists at key. """ result = self.req_list(self.new_req_list().add_add_on_nr(key, to_add))[0] self.process_result_add_on_nr(result) def test_and_set(self, key, old_value, new_value): """ Issues a test_and_set operation to scalaris and adds it to the current transaction. Atomic test and set, i.e. if the old value at key is old_value, then write new_value. """ result = self.req_list(self.new_req_list().add_test_and_set(key, old_value, new_value))[0] self.process_result_test_and_set(result) def nop(self, value): """ No operation (may be used for measuring the JSON overhead). """ value = self._conn.encode_value(value) result = self._conn.callp('/api/tx.yaws', 'nop', [value]) self._conn.process_result_nop(result) def close_connection(self): """ Close the connection to scalaris (it will automatically be re-opened on the next request). """ self._conn.close() class _JSONReqList(object): """ Generic request list. """ def __init__(self, other = None): """ Create a new object using a JSON connection. """ self._requests = [] self._is_commit = False if other is not None: self.extend(other) def add_read(self, key): """ Adds a read operation to the request list. """ if (self._is_commit): raise RuntimeError("No further request supported after a commit!") self._requests.append({'read': key}) return self def add_write(self, key, value): """ Adds a write operation to the request list. """ if (self._is_commit): raise RuntimeError("No further request supported after a commit!") self._requests.append({'write': {key: JSONConnection.encode_value(value)}}) return self def add_add_del_on_list(self, key, to_add, to_remove): """ Adds a add_del_on_list operation to the request list. """ if (self._is_commit): raise RuntimeError("No further request supported after a commit!") self._requests.append({'add_del_on_list': {'key': key, 'add': to_add, 'del': to_remove}}) return self def add_add_on_nr(self, key, to_add): """ Adds a add_on_nr operation to the request list. """ if (self._is_commit): raise RuntimeError("No further request supported after a commit!") self._requests.append({'add_on_nr': {key: to_add}}) return self def add_test_and_set(self, key, old_value, new_value): """ Adds a test_and_set operation to the request list. """ if (self._is_commit): raise RuntimeError("No further request supported after a commit!") self._requests.append({'test_and_set': {'key': key, 'old': old_value, 'new': new_value}}) return self def add_commit(self): """ Adds a commit operation to the request list. """ if (self._is_commit): raise RuntimeError("Only one commit per request list allowed!") self._requests.append({'commit': ''}) self._is_commit = True return self def get_requests(self): """ Gets the collected requests. """ return self._requests def is_commit(self): """ Returns whether the transactions contains a commit or not. """ return self._is_commit def is_empty(self): """ Checks whether the request list is empty. """ return self._requests == [] def size(self): """ Gets the number of requests in the list. """ return len(self._requests) def extend(self, other): """ Adds all requests of the other request list to the end of this list. """ self._requests.extend(other._requests) return self class _JSONReqListTransaction(_JSONReqList): """ Request list for use with Transaction.req_list(). """ def __init__(self, other = None): _JSONReqList.__init__(self, other) class _JSONReqListTransactionSingleOp(_JSONReqList): """ Request list for use with TransactionSingleOp.req_list() which does not support commits. """ def __init__(self, other = None): _JSONReqList.__init__(self, other) def add_commit(self): """ Adds a commit operation to the request list. """ raise RuntimeError("No commit allowed in TransactionSingleOp.req_list()!") class PubSub(object): """ Publish and subscribe methods accessing scalaris' pubsub system """ def __init__(self, conn = None): """ Create a new object using the given connection. """ if conn is None: conn = JSONConnection() self._conn = conn def publish(self, topic, content): """ Publishes content under topic. """ # note: do NOT encode the content, this is not decoded on the erlang side! # (only strings are allowed anyway) # content = self._conn.encode_value(content) result = self._conn.callp('/api/pubsub.yaws', 'publish', [topic, content]) self._conn.process_result_publish(result) def subscribe(self, topic, url): """ Subscribes url for topic. """ # note: do NOT encode the URL, this is not decoded on the erlang side! # (only strings are allowed anyway) # url = self._conn.encode_value(url) result = self._conn.callp('/api/pubsub.yaws', 'subscribe', [topic, url]) self._conn.process_result_subscribe(result) def unsubscribe(self, topic, url): """ Unsubscribes url from topic. """ # note: do NOT encode the URL, this is not decoded on the erlang side! # (only strings are allowed anyway) # url = self._conn.encode_value(url) result = self._conn.callp('/api/pubsub.yaws', 'unsubscribe', [topic, url]) self._conn.process_result_unsubscribe(result) def get_subscribers(self, topic): """ Gets the list of all subscribers to topic. """ result = self._conn.callp('/api/pubsub.yaws', 'get_subscribers', [topic]) return self._conn.process_result_get_subscribers(result) def nop(self, value): """ No operation (may be used for measuring the JSON overhead). """ value = self._conn.encode_value(value) result = self._conn.callp('/api/pubsub.yaws', 'nop', [value]) self._conn.process_result_nop(result) def close_connection(self): """ Close the connection to scalaris (it will automatically be re-opened on the next request). """ self._conn.close() class ReplicatedDHT(object): """ Non-transactional operations on the replicated DHT of scalaris """ def __init__(self, conn = None): """ Create a new object using the given connection. """ if conn is None: conn = JSONConnection() self._conn = conn # returns the number of successfully deleted items # use get_last_delete_result() to get more details def delete(self, key, timeout = 2000): """ Tries to delete the value at the given key. WARNING: This function can lead to inconsistent data (e.g. deleted items can re-appear). Also when re-creating an item the version before the delete can re-appear. """ result = self._conn.callp('/api/rdht.yaws', 'delete', [key, timeout]) (success, ok, results) = self._conn.process_result_delete(result) self._lastDeleteResult = results if success == True: return ok elif success == 'timeout': raise TimeoutError(result) else: raise UnknownError(result) def get_last_delete_result(self): """ Returns the result of the last call to delete(). NOTE: This function traverses the result list returned by scalaris and therefore takes some time to process. It is advised to store the returned result object once generated. """ return self._conn.create_delete_result(self._lastDeleteResult) def nop(self, value): """ No operation (may be used for measuring the JSON overhead). """ value = self._conn.encode_value(value) result = self._conn.callp('/api/rdht.yaws', 'nop', [value]) self._conn.process_result_nop(result) def close_connection(self): """ Close the connection to scalaris (it will automatically be re-opened on the next request). """ self._conn.close() class ScalarisVM(object): """ Provides methods to interact with a specific Scalaris (Erlang) VM. """ class GetInfoResult(object): def __init__(self, scalarisVersion, erlangVersion, memTotal, uptime, erlangNode, ip, port, yawsPort): self.scalarisVersion = scalarisVersion self.erlangVersion = erlangVersion self.memTotal = memTotal self.uptime = uptime self.erlangNode = erlangNode self.ip = ip self.port = port self.yawsPort = yawsPort def __init__(self, conn = None): """ Create a new object using the given connection. """ if conn is None: conn = JSONConnection() self._conn = conn def getVersion(self): """ Gets the version of the Scalaris VM of the current connection. """ result = self._conn.callp('/api/vm.yaws', 'get_version', []) return self._conn.process_result_vm_get_version(result) def getInfo(self): """ Gets some information about the VM and Scalaris. """ result = self._conn.callp('/api/vm.yaws', 'get_info', []) return self._conn.process_result_vm_get_info(result) def getNumberOfNodes(self): """ Gets the number of nodes in the Scalaris VM of the current connection. """ result = self._conn.callp('/api/vm.yaws', 'number_of_nodes', []) return self._conn.process_result_vm_get_number_of_nodes(result) def getNodes(self): """ Gets the names of the nodes in the Scalaris VM of the current connection. """ result = self._conn.callp('/api/vm.yaws', 'get_nodes', []) return self._conn.process_result_vm_get_nodes(result) def addNodes(self, number): """ Adds Scalaris nodes to the Scalaris VM of the current connection. """ result = self._conn.callp('/api/vm.yaws', 'add_nodes', [number]) return self._conn.process_result_vm_add_nodes(result) def shutdownNode(self, name): """ Shuts down the given node (graceful leave) in the Scalaris VM of the current connection. """ result = self._conn.callp('/api/vm.yaws', 'shutdown_node', [name]) return self._conn.process_result_vm_delete_node(result) def killNode(self, name): """ Kills the given node in the Scalaris VM of the current connection.
buf def _decryptAndUnseal(self, header, buf): """Decrypt AEAD encrypted data""" seqnumBytes = self._readState.getSeqNumBytes() # AES-GCM has an explicit variable nonce in TLS 1.2 if "aes" in self._readState.encContext.name and \ not self._is_tls13_plus(): explicitNonceLength = 8 if explicitNonceLength > len(buf): #Publicly invalid. raise TLSBadRecordMAC("Truncated nonce") nonce = self._readState.fixedNonce + buf[:explicitNonceLength] buf = buf[8:] else: # for TLS 1.3 and Chacha20 in TLS 1.2 share nonce generation # algorithm nonce = self._getNonce(self._readState, seqnumBytes) if self._readState.encContext.tagLength > len(buf): #Publicly invalid. raise TLSBadRecordMAC("Truncated tag") if not self._is_tls13_plus(): plaintextLen = len(buf) - self._readState.encContext.tagLength authData = seqnumBytes + bytearray([header.type, self.version[0], self.version[1], plaintextLen//256, plaintextLen%256]) else: # TLS 1.3 # enforce the checks for encrypted records if header.type != ContentType.application_data: raise TLSUnexpectedMessage( "Invalid ContentType for encrypted record: {0}" .format(ContentType.toStr(header.type))) if header.version != (3, 3): raise TLSIllegalParameterException( "Unexpected version in encrypted record: {0}" .format(header.version)) if header.length != len(buf): raise TLSBadRecordMAC("Length mismatch") authData = header.write() buf = self._readState.encContext.open(nonce, buf, authData) if buf is None: raise TLSBadRecordMAC("Invalid tag, decryption failure") return buf def _decryptSSL2(self, data, padding): """Decrypt SSL2 encrypted data""" # sequence numbers are incremented for plaintext records too seqnumBytes = self._readState.getSeqNumBytes() # # decrypt # if self._readState.encContext: if self._readState.encContext.isBlockCipher: blockLength = self._readState.encContext.block_size if len(data) % blockLength: raise TLSDecryptionFailed() data = self._readState.encContext.decrypt(data) # # strip and check MAC # if self._readState.macContext: macBytes = data[:16] data = data[16:] mac = self._readState.macContext.copy() mac.update(compatHMAC(data)) mac.update(compatHMAC(seqnumBytes[-4:])) calcMac = bytearray(mac.digest()) if macBytes != calcMac: raise TLSBadRecordMAC() # # strip padding # if padding: data = data[:-padding] return data @staticmethod def _tls13_de_pad(data): """ Remove the padding and extract content type from TLSInnerPlaintext. :param bytearray data: decrypted plaintext TLS 1.3 record payload (the serialised TLSInnerPlaintext data structure) :rtype: tuple """ # the padding is at the end and the first non-zero byte is the # padding # could be reversed(enumerate(data)), if that worked at all # could be reversed(list(enumerate(data))), if that didn't double # memory usage for pos, value in izip(reversed(xrange(len(data))), reversed(data)): if value != 0: break else: raise TLSUnexpectedMessage("Malformed record layer inner plaintext" " - content type missing") return data[:pos], value def recvRecord(self): """ Read, decrypt and check integrity of a single record :rtype: tuple :returns: message header and decrypted message payload :raises TLSDecryptionFailed: when decryption of data failed :raises TLSBadRecordMAC: when record has bad MAC or padding :raises socket.error: when reading from socket was unsuccessful :raises TLSRecordOverflow: when the received record was longer than allowed by negotiated version of TLS """ while True: result = None for result in self._recordSocket.recv(): if result in (0, 1): yield result else: break assert result is not None (header, data) = result # as trying decryption increments sequence number, we need to # keep the old one (we do copy of the whole object in case # some cipher has an internal state itself) read_state_copy = None if self.early_data_ok: # do the copy only when needed read_state_copy = copy.copy(self._readState) try: if isinstance(header, RecordHeader2): data = self._decryptSSL2(data, header.padding) if self.handshake_finished: header.type = ContentType.application_data # in TLS 1.3, the other party may send an unprotected CCS # message at any point in connection elif self._is_tls13_plus() and \ header.type == ContentType.change_cipher_spec: pass elif self._readState and \ self._readState.encContext and \ self._readState.encContext.isAEAD: data = self._decryptAndUnseal(header, data) elif self._readState and self._readState.encryptThenMAC: data = self._macThenDecrypt(header.type, data) elif self._readState and \ self._readState.encContext and \ self._readState.encContext.isBlockCipher: data = self._decryptThenMAC(header.type, data) else: data = self._decryptStreamThenMAC(header.type, data) # if we don't have an encryption context established # and early data is ok, that means we have received # encrypted record in case the type of record is # application_data (from TLS 1.3) if not self._readState.encContext \ and not self._readState.macContext \ and self.early_data_ok and \ header.type == ContentType.application_data: raise TLSBadRecordMAC("early data received") except TLSBadRecordMAC: if self.early_data_ok and ( self._early_data_processed + len(data) < self.max_early_data): # ignore exception, retry reading self._early_data_processed += len(data) # reload state for decryption self._readState = read_state_copy continue raise # as soon as we're able to decrypt messages again, we must # start checking the MACs self.early_data_ok = False # TLS 1.3 encrypts the type, CCS is not encrypted if self._is_tls13_plus() and self._readState and \ self._readState.encContext and\ header.type != ContentType.change_cipher_spec: # check if plaintext is not too big, RFC 8446, section 5.4 if len(data) > self.recv_record_limit + 1: raise TLSRecordOverflow() data, contentType = self._tls13_de_pad(data) header = RecordHeader3().create((3, 4), contentType, len(data)) # RFC 5246, section 6.2.1 if len(data) > self.recv_record_limit: raise TLSRecordOverflow() yield (header, Parser(data)) # # cryptography state methods # def changeWriteState(self): """ Change the cipher state to the pending one for write operations. This should be done only once after a call to :py:meth:`calcPendingStates` was performed and directly after sending a :py:class:`ChangeCipherSpec` message. """ if self.version in ((0, 2), (2, 0)): # in SSLv2 sequence numbers carry over from plaintext to encrypted # context self._pendingWriteState.seqnum = self._writeState.seqnum self._writeState = self._pendingWriteState self._pendingWriteState = ConnectionState() def changeReadState(self): """ Change the cipher state to the pending one for read operations. This should be done only once after a call to :py:meth:`calcPendingStates` was performed and directly after receiving a :py:class:`ChangeCipherSpec` message. """ if self.version in ((0, 2), (2, 0)): # in SSLv2 sequence numbers carry over from plaintext to encrypted # context self._pendingReadState.seqnum = self._readState.seqnum self._readState = self._pendingReadState self._pendingReadState = ConnectionState() @staticmethod def _getCipherSettings(cipherSuite): """Get the settings for cipher suite used""" if cipherSuite in CipherSuite.aes256GcmSuites: keyLength = 32 ivLength = 4 createCipherFunc = createAESGCM elif cipherSuite in CipherSuite.aes128GcmSuites: keyLength = 16 ivLength = 4 createCipherFunc = createAESGCM elif cipherSuite in CipherSuite.aes256Ccm_8Suites: keyLength = 32 ivLength = 4 createCipherFunc = createAESCCM_8 elif cipherSuite in CipherSuite.aes256CcmSuites: keyLength = 32 ivLength = 4 createCipherFunc = createAESCCM elif cipherSuite in CipherSuite.aes128Ccm_8Suites: keyLength = 16 ivLength = 4 createCipherFunc = createAESCCM_8 elif cipherSuite in CipherSuite.aes128CcmSuites: keyLength = 16 ivLength = 4 createCipherFunc = createAESCCM elif cipherSuite in CipherSuite.chacha20Suites: keyLength = 32 ivLength = 12 createCipherFunc = createCHACHA20 elif cipherSuite in CipherSuite.chacha20draft00Suites: keyLength = 32 ivLength = 4 createCipherFunc = createCHACHA20 elif cipherSuite in CipherSuite.aes128Suites: keyLength = 16 ivLength = 16 createCipherFunc = createAES elif cipherSuite in CipherSuite.aes256Suites: keyLength = 32 ivLength = 16 createCipherFunc = createAES elif cipherSuite in CipherSuite.rc4Suites: keyLength = 16 ivLength = 0 createCipherFunc = createRC4 elif cipherSuite in CipherSuite.tripleDESSuites: keyLength = 24 ivLength = 8 createCipherFunc = createTripleDES elif cipherSuite in CipherSuite.nullSuites: keyLength = 0 ivLength = 0 createCipherFunc = None else: raise AssertionError() return (keyLength, ivLength, createCipherFunc) @staticmethod def _getMacSettings(cipherSuite): """Get settings for HMAC used""" if cipherSuite in CipherSuite.aeadSuites: macLength = 0 digestmod = None elif cipherSuite in CipherSuite.shaSuites: macLength = 20 digestmod = hashlib.sha1 elif cipherSuite in CipherSuite.sha256Suites: macLength = 32 digestmod = hashlib.sha256 elif cipherSuite in CipherSuite.sha384Suites: macLength = 48 digestmod = hashlib.sha384 elif cipherSuite in CipherSuite.md5Suites: macLength = 16 digestmod = hashlib.md5 else: raise AssertionError() return macLength, digestmod @staticmethod def _getHMACMethod(version): """Get the HMAC method""" assert version in ((3, 0), (3, 1), (3, 2), (3, 3)) if version == (3, 0): createMACFunc = createMAC_SSL elif version in ((3, 1), (3, 2), (3, 3)): createMACFunc = createHMAC return createMACFunc def calcSSL2PendingStates(self, cipherSuite, masterSecret, clientRandom, serverRandom, implementations): """ Create the keys for encryption and decryption in SSLv2 While we could reuse calcPendingStates(), we need to provide the key-arg data for the server that needs to be passed up to handshake protocol. """ if cipherSuite in CipherSuite.ssl2_128Key: key_length = 16 elif cipherSuite in CipherSuite.ssl2_192Key: key_length = 24 elif cipherSuite in CipherSuite.ssl2_64Key: key_length = 8 else: raise ValueError("Unknown cipher specified") key_material = bytearray(key_length * 2) md5_output_size = 16 for i, pos in enumerate(range(0, key_length * 2, md5_output_size)): key_material[pos:pos+md5_output_size] = MD5(\ masterSecret + bytearray(str(i), "ascii") + clientRandom + serverRandom) serverWriteKey = key_material[:key_length] clientWriteKey = key_material[key_length:] # specification draft says that DES key should not use the # incrementing label but all implementations use it anyway #elif cipherSuite in CipherSuite.ssl2_64Key: # key_material = MD5(masterSecret + clientRandom + serverRandom) # serverWriteKey = key_material[0:8] # clientWriteKey = key_material[8:16] # RC4
import demistomock as demisto from CommonServerPython import * ''' IMPORTS ''' import requests import json import os from datetime import datetime, timedelta import collections # disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBAL VARS ''' SERVER = demisto.params().get('serverURL', '').strip('/') SERVER_URL = SERVER + '/api/v3' API_KEY = demisto.params()['APIKey'] USE_SSL = not demisto.params().get('insecure') DEFAULT_HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': API_KEY } ''' HELPER FUNCTIONS ''' if not demisto.params()['proxy']: del os.environ['HTTP_PROXY'] del os.environ['HTTPS_PROXY'] del os.environ['http_proxy'] del os.environ['https_proxy'] def http_request(method, url_suffix, params_dict, headers, data=None): req_params = {} # type: Dict[Any,Any] if params_dict is not None: req_params.update(params_dict) url = SERVER_URL + url_suffix LOG('running {} request with url={}\tparams={}'.format(method, url, json.dumps(req_params))) try: res = requests.request(method, url, verify=USE_SSL, params=req_params, headers=headers, data=data ) res.raise_for_status() try: return res.json() except ValueError: # in case the response doesn't have JSON return "Request completed" except Exception, e: LOG(e) raise(e) def underscore_to_camelcase(word): return ' '.join(x.capitalize() or '_' for x in word.split('_')) def create_incident_data_from_alert(alert): alert.pop('comments') alert.pop('observations') return { 'name': 'Stealthwatch alert ' + str(alert.get('id', '')), 'rawJSON': json.dumps(alert), 'occurred': alert.get('created', '') } def get_latest_id(alerts_data): latest_id = 0 for alert in alerts_data: current_id = alert.get('id', None) if current_id is not None and current_id > latest_id: latest_id = current_id return latest_id ''' COMMANDS FUNCTIONS ''' def show_alert(alert_id): """ Returns alert by specific id """ api_endpoint = "/alerts/alert/{}/".format(alert_id) return http_request('GET', api_endpoint, {}, DEFAULT_HEADERS) def show_alert_command(): """ corresponds to 'sw-show-alert' command. Returns information about a specific alert """ alert_id = demisto.args().get('alertID') alert_data = show_alert(alert_id) if not demisto.args().get('addComments', False) == 'true': alert_data.pop('comments') alert_data.pop('new_comment') alert_data.pop('observations') list_for_md = ['resolved', 'id', 'last_modified', 'obj_created', 'assigned_to'] dict_for_md = {underscore_to_camelcase(k): v for k, v in alert_data.iteritems() if k in list_for_md} md = tableToMarkdown(alert_data.get('text', ''), dict_for_md) return { 'Type': entryTypes['note'], 'Contents': alert_data, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': md, 'EntryContext': { "Stealthwatch.Alert(val.id==obj.id)": alert_data } } def update_alert(alert_id, params): """ Updates alert by specific id """ api_endpoint = "/alerts/alert/{}/".format(alert_id) return http_request('PUT', api_endpoint, params, DEFAULT_HEADERS) def update_alert_command(): """ corresponds to 'sw-update-alert' command. Returns information about a specific alert """ args = demisto.args() alert_id = args.get('alertID') update_params = {} # adding the possible params for update possible_params = ['new_comment', 'tags', 'publish_time', 'resolved', 'snooze_settings', 'merit', 'assigned_to'] for param in possible_params: current_param = args.get(param, False) if current_param: update_params[param] = current_param username = args.get('resolved_user', None) if username is not None: update_params['resolved_user'] = { 'username': username } alert_data = update_alert(alert_id, update_params) alert_data.pop('comments') alert_data.pop('new_comment') alert_data.pop('observations') list_for_md = ['resolved', 'id', 'last_modified', 'obj_created', 'assigned_to'] dict_for_md = {k: v for k, v in alert_data.iteritems() if k in list_for_md} md = tableToMarkdown(alert_data.get('text', ''), dict_for_md) return { 'Type': entryTypes['note'], 'Contents': alert_data, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': md, 'EntryContext': { "Stealthwatch.Alert(val.id==obj.id)": alert_data } } def list_alerts(params): """ Retrieves alerts """ api_endpoint = "/alerts/alert/" return http_request('GET', api_endpoint, params, DEFAULT_HEADERS) def build_alert_dic(alert): dic = collections.OrderedDict() # type: Dict[str,str] list_for_md = ['id', 'last_modified', 'resolved', 'text', 'obj_created', 'assigned_to', 'description'] for item in list_for_md: dic[underscore_to_camelcase(item)] = alert[item] return dic def list_alerts_command(): """ corresponds to 'sw-list-alerts' command. Returns a list of Stealthwatch alerts """ args = demisto.args() list_params = {} # adding the possible params for update possible_params = ['status', 'tags', 'search', 'assignee', 'limit'] for param in possible_params: current_param = args.get(param, False) if current_param: list_params[param] = current_param alerts_data = list_alerts(list_params).get('objects') md_dicts_list = [] for alert in alerts_data: if not demisto.args().get('addComments', False) == 'true': alert.pop('comments') alert.pop('new_comment') alert.pop('observations') md_dicts_list.append(build_alert_dic(alert)) md = tableToMarkdown("The following alerts were retrieved", md_dicts_list) return { 'Type': entryTypes['note'], 'Contents': alerts_data, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': md, 'EntryContext': { "Stealthwatch.Alert(val.id==obj.id)": alerts_data } } def domain_block(params): """ Updates domain blacklist status """ api_endpoint = "/blacklist/domains/" return http_request('POST', api_endpoint, {}, DEFAULT_HEADERS, params) def block_domain_command(): """ corresponds to 'sw-block-domain-or-ip' command. Adds a domain to the blacklist """ domain = demisto.args().get('domain') ip = demisto.args().get('ip') if not (domain or ip): return { "Type": entryTypes["error"], "ContentsFormat": formats["text"], "Contents": 'Please enter either domain or ip' } if domain and ip: return { "Type": entryTypes["error"], "ContentsFormat": formats["text"], "Contents": 'Please enter only domain or ip, not both' } identifier = None if domain: identifier = domain else: identifier = ip domain_params = { "identifier": identifier, "category": "domain", "list_on": "blacklist" } domain_result = domain_block(json.dumps(domain_params)) ec = None if domain: ec = { "Stealthwatch.Domain(val.identifier==obj.identifier)": domain_result } else: ec = { "Stealthwatch.IP(val.identifier==obj.identifier)": domain_result } return { 'Type': entryTypes['note'], 'Contents': domain_result, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Blacklist ' + domain + ' result', domain_result), 'EntryContext': ec } def domain_unblock(domain_id): """ Removes domain from the blacklist """ api_endpoint = "/blacklist/domains/{}/".format(domain_id) return http_request('DELETE', api_endpoint, None, DEFAULT_HEADERS, None) def unblock_domain_command(): """ corresponds to 'sw-unblock-domain' command. Removes a domain to the blacklist """ domain_id = demisto.args().get('id') domain_result = domain_unblock(domain_id) return { 'Type': entryTypes['note'], 'Contents': domain_result, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': 'Unblocked domain with id: ' + domain_id, } def list_domains(list_params): """ Lists blacklisted domains """ api_endpoint = "/blacklist/domains/" return http_request('GET', api_endpoint, list_params, DEFAULT_HEADERS, {}) def list_blocked_domains_command(): """ corresponds to 'sw-list-blocked-domains' command. Returns a list of the blocked domains """ args = demisto.args() list_params = {} # adding the possible params for update possible_params = ['search', 'limit'] for param in possible_params: current_param = args.get(param, False) if current_param: list_params[param] = current_param specific_domain = args.get('domain', None) if specific_domain is not None: list_params['identifier'] = specific_domain domains_data = list_domains(list_params) domains_result = domains_data.get('objects', {}) data_output = [] for obs in domains_result: data_output.append({underscore_to_camelcase(k): v for k, v in obs.items()}) return { 'Type': entryTypes['note'], 'Contents': domains_data, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Current blacklisted domains are', data_output), 'EntryContext': { "Stealthwatch.Domain(val.identifier==obj.identifier)": domains_result } } def list_observations(params): """ Lists observations """ api_endpoint = "/observations/all/" return http_request('GET', api_endpoint, params, DEFAULT_HEADERS) def list_observations_command(): """ corresponds to 'sw-list-observations' command. Returns a list of Stealthwatch observations """ args = demisto.args() list_params = { "ordering": 'creation_time' } # adding the possible params for update possible_params = ['alert', 'id', 'search', 'limit'] for param in possible_params: current_param = args.get(param, False) if current_param: list_params[param] = current_param observations_data = list_observations(list_params).get('objects') data_output = [] for obs in observations_data: data_output.append({underscore_to_camelcase(k): v for k, v in obs.items()}) return { 'Type': entryTypes['note'], 'Contents': data_output, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Found the following observations', data_output), 'EntryContext': { "Stealthwatch.Observation(val.id==obj.id)": observations_data } } def list_sessions(params): """ Lists observations """ api_endpoint = "/snapshots/session-data/" return http_request('GET', api_endpoint, params, DEFAULT_HEADERS) def list_sessions_command(): """ corresponds to 'sw-list-sessions' command. Returns a list of Stealthwatch sessions """ date_format = "%Y-%m-%dT%H:%M:%SZ" list_params = {} ip = demisto.args().get('ip') connected_ip = demisto.args().get('connectedIP') connected_device_id = demisto.args().get('connectedDeviceId') limit = demisto.args().get('limit') start_time = demisto.args().get('startTime', None) end_time = demisto.args().get('endTime', None) session_type = demisto.args().get('sessionType', 'all') if start_time and end_time: list_params['start_datetime'] = start_time list_params['end_datetime'] = end_time elif end_time is None: start_time_object = datetime.strptime(start_time, date_format) start_time_object = start_time_object - timedelta(minutes=5) end_time_object = start_time_object + timedelta(minutes=5) start_time = start_time_object.strftime(date_format) end_time = end_time_object.strftime(date_format) list_params['ip'] = ip list_params['connected_ip'] = connected_ip list_params['limit'] = limit list_params['start_datetime'] = start_time list_params['end_datetime'] = end_time list_params['connected_device_id'] = connected_device_id unique_session_ids = [] # type: List[str] final_sessions_data = [] sessions_data = list_sessions(list_params).get('objects') for sess in sessions_data: if sess['connected_ip'] not in unique_session_ids: unique_session_ids.append(sess['connected_ip']) if demisto.get(sess, 'connected_device_id'): sess['connected_device_is_external'] = False if session_type == 'internal': final_sessions_data.append(sess) else: sess['connected_device_is_external'] = True if session_type == 'external': final_sessions_data.append(sess) if session_type == 'all': final_sessions_data.append(sess) data_output = [] for sess in final_sessions_data: data_output.append({underscore_to_camelcase(k): v for k, v in sess.items()}) return { 'Type': entryTypes['note'], 'Contents': data_output, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Found the following session data', data_output), 'EntryContext': { "Stealthwatch.Session(val.id==obj.id)": final_sessions_data } } def fetch_incidents(): date_format = "%Y-%m-%dT%H:%M:%SZ" list_params = { "ordering": 'created', "limit": 100 } final_alerts = [] last_fetch_string = demisto.getLastRun().get('last_fetch_time', None) ids = demisto.getLastRun().get('ids', None) first_time = (not last_fetch_string and ids is not None) if last_fetch_string is None or not last_fetch_string: now = datetime.now() last_fetch = now - timedelta(days=20) else: last_fetch = parse_date_string(last_fetch_string) # Couldn't find a way to sort descending so looking for last offset of 100 alerts alerts_response = list_alerts(list_params) num_alerts = alerts_response.get('meta', {'total_count': 100}).get('total_count') offset = 0 if num_alerts < 100 else num_alerts - 100 list_params['offset'] = offset alerts_response = list_alerts(list_params) alerts_data = alerts_response.get('objects', []) max_fetch_time = last_fetch_string if last_fetch_string else now.strftime(date_format) for alert in alerts_data: created = alert.get('created') if parse_date_string(created) > last_fetch: incident_from_alert = create_incident_data_from_alert(alert) if first_time: if alert.get('id') not in ids: final_alerts.append(incident_from_alert) else: final_alerts.append(incident_from_alert) if parse_date_string(created) > parse_date_string(max_fetch_time): max_fetch_time = created demisto.setLastRun({ 'last_fetch_time': max_fetch_time }) demisto.incidents(final_alerts) ''' EXECUTION CODE ''' try: if demisto.command() == 'test-module': # This is the call made when pressing the integration
[altchars]" in output assert ">>> base64.b64encode(s, altchars=None)" in output assert "$ py base64.b64encode s [altchars]" in output assert "$ py base64.b64encode --s=... [--altchars=...]" in output assert "--version" not in output assert "[PYFLYBY] import base64" in output if PY2: assert "\n Encode a string using Base64." in output else: assert "\n Encode the bytes-like object s using Base64 and return a bytes object." in output assert "binascii.b2a_base64" not in output def test_function_help_autoimport_1(): output = py("b64encode", "--help") assert "[PYFLYBY] from base64 import b64encode" in output assert "s [altchars]" in output assert "$ py b64encode s [altchars]" in output assert "binascii.b2a_base64" not in output def test_function_help_expression_1(): output = py("sys.stdout.write", "--help") assert '>>> sys.stdout.write(' in output assert "$ py sys.stdout.write " in output if PY2: assert "Write string str to file." in output, output else: assert "Write string to stream." in output, output def test_function_help_quote_need_parens_1(): output = py('lambda a,b: a*b', '-h') assert ">>> (lambda a,b: a*b)(a, b)" in output assert "$ py 'lambda a,b: a*b' a b" in output def test_function_help_quote_already_have_parens_1(): output = py('(lambda a,b: a*b)', '-h') assert ">>> (lambda a,b: a*b)(a, b)" in output assert "$ py '(lambda a,b: a*b)' a b" in output def test_function_help_quote_nested_lambdas_1(): output = py('(lambda a,b: lambda c,d: a*b*c*d)(2,3)', '-h') assert ">>> (lambda a,b: lambda c,d: a*b*c*d)(2,3)(c, d)" in output assert "$ py '(lambda a,b: lambda c,d: a*b*c*d)(2,3)' c d" in output assert "$ py '(lambda a,b: lambda c,d: a*b*c*d)(2,3)' --c=... --d=..." in output def test_help_class_init_1(tmp): writetext(tmp.dir/"f80166304.py", """ class Greenpoint(object): def __init__(self66770013, Milton, * Noble, ** India): pass """) writetext(tmp.dir/"p", """ from f80166304 import Greenpoint """) output = py("Greenpoint?", PYTHONPATH=tmp.dir, PYFLYBY_PATH=tmp.dir/"p") assert ">>> Greenpoint(Milton, *Noble, **India)" in output def test_help_class_init_oldclass_1(tmp): writetext(tmp.dir/"f20579393.py", """ class Williamsburg: def __init__(self42828936, Metropolitan, * Morgan, ** Bogart): pass """) writetext(tmp.dir/"p", """ from f20579393 import Williamsburg """) output = py("Williamsburg??", PYTHONPATH=tmp.dir, PYFLYBY_PATH=tmp.dir/"p") assert ">>> Williamsburg(Metropolitan, *Morgan, **Bogart)" in output def test_help_class_new_1(tmp): writetext(tmp.dir/"f56365338.py", """ class Knickerbocker(object): def __init__(cls15515092, Wilson, * Madison, ** Linden): pass """) writetext(tmp.dir/"p", """ from f56365338 import Knickerbocker """) output = py("?Knickerbocker", PYTHONPATH=tmp.dir, PYFLYBY_PATH=tmp.dir/"p") assert ">>> Knickerbocker(Wilson, *Madison, **Linden)" in output @pytest.mark.parametrize("cmdline", [ "b64encode --help", "b64encode -help", "b64encode --h", "b64encode -h", "b64encode --?", "b64encode -?", "b64encode ?", "--help b64encode", "-help b64encode", "help b64encode", "--h b64encode", "-h b64encode", "--? b64encode", "-? b64encode", "? b64encode", "b64encode?", "?b64encode", "base64.b64encode --help", "base64.b64encode -help", "base64.b64encode --h", "base64.b64encode -h", "base64.b64encode --?", "base64.b64encode -?", "base64.b64encode ?", "--help base64.b64encode", "-help base64.b64encode", "help base64.b64encode", "--? base64.b64encode", "-? base64.b64encode", "? base64.b64encode", "base64.b64encode?", "?base64.b64encode", ]) def test_function_help_variants_1(cmdline): output = py(cmdline.split()) assert "s [altchars]" in output assert "--version" not in output assert "binascii.b2a_base64" not in output def test_function_source_1(): output = py("base64.b64encode", "--source") assert "[PYFLYBY] import base64" in output assert ">>> base64.b64encode(s, altchars=None)" in output assert "$ py base64.b64encode s [altchars]" in output assert "$ py base64.b64encode --s=... [--altchars=...]" in output assert "binascii.b2a_base64" in output # from source code if PY2: assert output.count("Encode a string using Base64") == 1, output else: assert output.count("Encode the bytes-like object s using Base64 and return a bytes object.") == 1 assert "--version" not in output def test_function_source_autoimport_1(): output = py("b64encode", "--source") assert "[PYFLYBY] from base64 import b64encode" in output assert ">>> b64encode(s, altchars=None)" in output assert "$ py b64encode s [altchars]" in output assert "$ py b64encode --s=... [--altchars=...]" in output assert "binascii.b2a_base64" in output # from source code if PY2: assert output.count("Encode a string using Base64") == 1, output else: assert output.count("Encode the bytes-like object s using Base64 and return a bytes object.") == 1 @pytest.mark.parametrize("cmdline", [ "b64encode --source", "b64encode -source", "b64encode --??", "b64encode -??", "b64encode ??", "--source b64encode", "-source b64encode", "source b64encode", "--?? b64encode", "-?? b64encode", "?? b64encode", "b64encode??", "??b64encode", "base64.b64encode --source", "base64.b64encode -source", "base64.b64encode --??", "base64.b64encode -??", "base64.b64encode ??", "--source base64.b64encode", "-source base64.b64encode", "source base64.b64encode", "--?? base64.b64encode", "-?? base64.b64encode", "?? base64.b64encode", "base64.b64encode??", "??base64.b64encode" ]) def test_function_source_variants_1(cmdline): output = py(cmdline.split()) assert "s [altchars]" in output assert "binascii.b2a_base64" in output def test_module_help_1(): output = py("base64?") assert "RFC 3548" in output assert "import binascii" not in output @pytest.mark.parametrize("args", [ "base64 --help", "base64 -help", "base64 --h", "base64 -h", "base64 --?", "base64 -?", "base64 ?", "--help base64", "-help base64", "help base64", "--h base64", "-h base64", "--? base64", "-? base64", "? base64", "base64?", "?base64", ]) def test_module_help_variants_1(args): output = py(args.split()) assert "RFC 3548" in output, output assert "import binascii" not in output def test_module_source_1(): output = py("base64??") assert "RFC 3548" in output assert "import binascii" in output def test_module_no_help_1(): output, retcode = py("-m", "base64", "--help") assert retcode == 2 assert "option --help not recognized" in output assert "RFC 3548" not in output @pytest.mark.parametrize("args", [ "base64 --source", "base64 -source", "base64 --??", "base64 -??", "base64 ??", "--source base64", "-source base64", "source base64", "--?? base64", "-?? base64", "?? base64", "base64??", "??base64", ]) def test_module_source_variants_1(args): output = py(args.split()) assert "RFC 3548" in output assert "import binascii" in output @pytest.mark.parametrize("args", [ "--help=3", "-help=3", "--help 3", "-help 3", "--hel=3", "-hel=3", "--hel 3", "-hel 3", "--he=3", "-he=3", "--he 3", "-he 3", "--h=3", "-h=3", "--h 3", "-h 3", ]) def test_function_arg_help_1(args): result = py('lambda help: help*4', *(args.split())) expected = dedent(""" [PYFLYBY] lambda help: help*4 [PYFLYBY] (lambda help: help*4)(3) 12 """).strip() assert expected == result @pytest.mark.parametrize("args", [ "--hello=3", "-hello=3", "--hello 3", "-hello 3", "--hel=3", "-hel=3", "--hel 3", "-hel 3", "--he=3", "-he=3", "--he 3", "-he 3", "--h=3", "-h=3", "--h 3", "-h 3", ]) def test_function_arg_hello_1(args): result = py('lambda hello: hello*7', *(args.split())) expected = dedent(""" [PYFLYBY] lambda hello: hello*7 [PYFLYBY] (lambda hello: hello*7)(3) 21 """).strip() assert expected == result def test_function_arg_help_qmark_1(): output = py('lambda help: help*4', '-?') assert "$ py 'lambda help: help*4' help" in output def test_function_arg_help_help_1(): output, retcode = py('lambda help: help*4', '--help') assert retcode == 1 assert "Missing argument to --help" in output def test_function_arg_help_h_1(): output, retcode = py('lambda help: help*4', '-h') assert retcode == 1 assert "Missing argument to -h" in output def test_object_method_help_1(): output = py('email.message.Message().get', '--help') assert "$ py 'email.message.Message().get' name [failobj]" in output assert "Get a header value." in output @pytest.mark.parametrize("args", [ "email.message.Message().get --help", "email.message.Message().get -help", "email.message.Message().get --h", "email.message.Message().get -h", "email.message.Message().get --?", "email.message.Message().get -?", "email.message.Message().get ?", "email.message.Message().get?", "--help email.message.Message().get", "-help email.message.Message().get", "help email.message.Message().get", "--h email.message.Message().get", "-h email.message.Message().get", "--? email.message.Message().get", "-? email.message.Message().get", "? email.message.Message().get", "?email.message.Message().get", ]) def test_object_method_help_variants_1(args): output = py(args.split()) assert "$ py 'email.message.Message().get' name [failobj]" in output assert "Get a header value." in output def test_object_method_source_1(): output = py('email.message.Message().get', '--source') assert "$ py 'email.message.Message().get' name [failobj]" in output assert "Get a header value." in output assert "name.lower()" in output @pytest.mark.parametrize("args", [ "email.message.Message().get --source", "email.message.Message().get -source", "email.message.Message().get --??", "email.message.Message().get -??", "email.message.Message().get ??", "email.message.Message().get??", "--source email.message.Message().get", "-source email.message.Message().get", "source email.message.Message().get", "--?? email.message.Message().get", "-?? email.message.Message().get", "?? email.message.Message().get", "??email.message.Message().get", ]) def test_object_method_source_variants1(args): output = py(args.split()) assert "$ py 'email.message.Message().get' name [failobj]" in output assert "Get a header value." in output assert "name.lower()" in output def test_arg_nodashdash_1(): result = py('print', '42.0000', 'sys') expected = dedent(""" [PYFLYBY] import sys [PYFLYBY] print(42.0, <module 'sys' (built-in)>) 42.0 <module 'sys' (built-in)> """).strip() assert expected == result def test_arg_dashdash_1(): result = py('print', '--', '42.0000', 'sys') expected = dedent(""" [PYFLYBY] print('42.0000', 'sys') 42.0000 sys """).strip() assert expected == result def test_arg_dashdash_2(): result = py('print', '42.0000', '--', 'sys') expected = dedent(""" [PYFLYBY] print(42.0, 'sys') 42.0 sys """).strip() assert expected == result def test_arg_dashdash_3(): result = py('print', '42.0000', 'sys', '--') expected = dedent(""" [PYFLYBY] import sys [PYFLYBY] print(42.0, <module 'sys' (built-in)>) 42.0 <module 'sys' (built-in)> """).strip() assert expected == result def test_arg_dashdash_4(): result = py('print', '42.0000', 'sys', '--', '--') expected = dedent(""" [PYFLYBY] import sys [PYFLYBY] print(42.0, <module 'sys' (built-in)>, '--') 42.0 <module 'sys' (built-in)> -- """).strip() assert expected == result def test_arg_dashdash_help_1(): result = py('print', '--', '--help') expected = dedent(""" [PYFLYBY] print('--help') --help """).strip() assert expected == result def test_arg_dashdash_dashdash_1(): result = py('print', '--', '--', '42.000') expected = dedent(""" [PYFLYBY] print('--', '42.000') -- 42.000 """).strip() assert expected == result def test_kwargs_no_dashdash_1(): result = py("lambda *a,**k: (a,k)", "3.500", "--foo", "7.500") expected = dedent(""" [PYFLYBY] lambda *a,**k: (a,k) [PYFLYBY] (lambda *a,**k: (a,k))(3.5, foo=7.5) ((3.5,), {'foo': 7.5}) """).strip() assert expected == result, result def test_kwargs_dashdash_1(): result = py("lambda *a,**k: (a,k)", "--", "3.500", "--foo", "7.500") expected = dedent(""" [PYFLYBY] lambda *a,**k: (a,k) [PYFLYBY] (lambda *a,**k: (a,k))('3.500', '--foo', '7.500') (('3.500', '--foo', '7.500'), {}) """).strip() assert expected == result def test_joinstr_1(): result = py("3", "+", "5") expected
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function import sys import threading import unittest from os import path import time import re import shutil import os import sublime import shellenv import package_events if sys.version_info < (3,): from Queue import Queue else: from queue import Queue from .mocks import GolangBuildMock TEST_GOPATH = path.join(path.dirname(__file__), 'go_projects') TEST_GOPATH2 = path.join(path.dirname(__file__), 'go_projects2') VIEW_SETTINGS = { 'GOPATH': TEST_GOPATH, 'GOOS': None, 'GOARCH': None, 'GOARM': None, 'GO386': None, 'GORACE': None } CROSS_COMPILE_OS = 'darwin' if sys.platform != 'darwin' else 'linux' class GolangBuildTests(unittest.TestCase): def setUp(self): skip_entries = {} skip_entries[TEST_GOPATH] = set(['.git-keep', 'good', 'bad', 'runnable']) skip_entries[TEST_GOPATH2] = set(['.git-keep', 'runnable2']) for gopath in (TEST_GOPATH, TEST_GOPATH2): for subdir in ('pkg', 'bin', 'src'): full_path = path.join(gopath, subdir) for entry in os.listdir(full_path): if entry in skip_entries[gopath]: continue entry_path = path.join(full_path, entry) if path.isdir(entry_path): shutil.rmtree(entry_path) else: os.remove(entry_path) def test_build(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build') result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go build" succeed?')) def test_build_flags(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build', {'flags': ['-v', '-x']}) result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go build" succeed and print all commands?')) def test_build_flags_from_settings(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') with GolangBuildMock(sublime_settings={'build:flags': ['-v', '-x']}): def _run_build(view, result_queue): view.window().run_command('golang_build') result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go build" succeed and print all commands?')) def test_install_flags_from_view_settings(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build', {'task': 'install'}) custom_view_settings = VIEW_SETTINGS.copy() custom_view_settings['install:flags'] = ['-v', '-x'] result_queue = open_file(file_path, custom_view_settings, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go install" succeed and print all commands?')) def test_clean(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build', {'task': 'clean'}) result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go clean" succeed?')) def test_test(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build', {'task': 'test'}) result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go test" succeed?')) def test_run(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'runnable', 'main.go') def _run_build(view, result_queue): view.window().run_command('golang_build', {'task': 'run'}) result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go run" succeed?')) def test_run_with_file_path_flag_absolute(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build', {'task': 'run'}) custom_view_settings = VIEW_SETTINGS.copy() custom_view_settings['run:flags'] = [os.path.join(TEST_GOPATH, 'src', 'runnable', 'main.go')] result_queue = open_file(file_path, custom_view_settings, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go run" succeed for runnable/main.go?')) def test_run_with_file_path_flag_relative(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build', {'task': 'run'}) custom_view_settings = VIEW_SETTINGS.copy() custom_view_settings['run:flags'] = ['runnable/main.go'] result_queue = open_file(file_path, custom_view_settings, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go run" succeed for runnable/main.go?')) def test_run_with_file_path_flag_relative_multiple_gopath(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build', {'task': 'run'}) custom_view_settings = VIEW_SETTINGS.copy() custom_view_settings['GOPATH'] = os.pathsep.join([TEST_GOPATH, TEST_GOPATH2]) custom_view_settings['run:flags'] = ['runnable2/main.go'] result_queue = open_file(file_path, custom_view_settings, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go run" succeed for runnable2/main.go?')) def test_install(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build', {'task': 'install'}) result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go install" succeed?')) def test_cross_compile(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') begin_event = threading.Event() def _run_build(view, result_queue): notify_user('Select %s/amd64 from quick panel' % CROSS_COMPILE_OS) begin_event.set() view.window().run_command('golang_build', {'task': 'cross_compile'}) result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) begin_event.wait() result = wait_build(result_queue, timeout=15) self.assertEqual('success', result) self.assertTrue(confirm_user('Did the cross-compile succeed?')) def test_get(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') begin_event = threading.Event() def _run_build(view, result_queue): sublime.set_clipboard('github.com/golang/example/hello') notify_user('Paste from the clipboard into the input panel') begin_event.set() view.window().run_command('golang_build_get') result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) begin_event.wait() result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go get" succeed?')) def test_get_flags(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') begin_event = threading.Event() def _run_build(view, result_queue): sublime.set_clipboard('github.com/golang/example/hello') notify_user('Paste from the clipboard into the input panel') begin_event.set() view.window().run_command('golang_build_get', {'flags': ['-v', '-d']}) result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) begin_event.wait() result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go get" download but not install?')) def test_get_flags_from_settings(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') with GolangBuildMock(sublime_settings={'get:flags': ['-v', '-d']}): def _run_build(view, result_queue): view.window().run_command('golang_build_get', {'url': 'github.com/golang/example/hello'}) result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go get" download but not install?')) def test_get_url(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build_get', {'url': 'github.com/golang/example/hello'}) result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) self.assertTrue(confirm_user('Did "go get" succeed for "github.com/golang/example/hello"?')) def test_terminal(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build_terminal') open_file(file_path, VIEW_SETTINGS, _run_build) self.assertTrue(confirm_user('Did a terminal open to Packages/Golang Build/dev/go_projects/src/good/?')) def test_build_bad(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'bad', 'hello.go') def _run_build(view, result_queue): view.window().run_command('golang_build') result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('error', result) self.assertTrue(confirm_user('Did "go build" fail?')) def test_build_cancel(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build') def _cancel_build(): view.window().run_command('golang_build_cancel') sublime.set_timeout(_cancel_build, 50) # We perform a cross-compile so the user has time to interrupt the build custom_view_settings = VIEW_SETTINGS.copy() custom_view_settings['GOOS'] = CROSS_COMPILE_OS custom_view_settings['GOARCH'] = 'amd64' result_queue = open_file(file_path, custom_view_settings, _run_build) result = wait_build(result_queue) self.assertEqual('cancelled', result) self.assertTrue(confirm_user('Was "go build" successfully cancelled?')) def test_build_reopen(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): view.window().run_command('golang_build') result_queue = open_file(file_path, VIEW_SETTINGS, _run_build) result = wait_build(result_queue) self.assertEqual('success', result) time.sleep(0.4) def _hide_panel(): sublime.active_window().run_command('hide_panel') sublime.set_timeout(_hide_panel, 1) time.sleep(0.4) self.assertTrue(confirm_user('Was the build output hidden?')) def _reopen_panel(): sublime.active_window().run_command('golang_build_reopen') sublime.set_timeout(_reopen_panel, 1) time.sleep(0.4) self.assertTrue(confirm_user('Was the build output reopened?')) def test_build_interrupt(self): ensure_not_ui_thread() file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') begin_event = threading.Event() second_begin_event = threading.Event() def _run_build(view, result_queue): notify_user('Press the "Stop Running Build" button when prompted') begin_event.set() view.window().run_command('golang_build') def _new_build(): view.window().run_command('golang_build') second_begin_event.set() sublime.set_timeout(_new_build, 50) # We perform a cross-compile so the user has time to interrupt the build custom_view_settings = VIEW_SETTINGS.copy() custom_view_settings['GOOS'] = CROSS_COMPILE_OS custom_view_settings['GOARCH'] = 'amd64' result_queue = open_file(file_path, custom_view_settings, _run_build) begin_event.wait() result1 = wait_build(result_queue) self.assertEqual('cancelled', result1) second_begin_event.wait() result2 = wait_build(result_queue) self.assertEqual('success', result2) self.assertTrue(confirm_user('Was the first build cancelled and the second successful?')) def test_build_go_missing(self): ensure_not_ui_thread() shell, _ = shellenv.get_env() search_path = path.expanduser('~') with GolangBuildMock(shell=shell, env={'PATH': search_path}): file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): notify_user('Press the "Open Documentation" button when prompted about go not being found in the PATH') view.window().run_command('golang_build') open_file(file_path, VIEW_SETTINGS, _run_build) time.sleep(0.5) self.assertTrue(confirm_user('Were you prompted that go could not be found in the PATH?')) self.assertTrue(confirm_user('When you pressed "Open Documentation", was it opened in your browser?')) def test_build_no_gopath(self): ensure_not_ui_thread() shell, env = shellenv.get_env() if 'GOPATH' in env: del env['GOPATH'] with GolangBuildMock(shell=shell, env=env): file_path = path.join(TEST_GOPATH, 'src', 'good', 'rune_len.go') def _run_build(view, result_queue): notify_user('Press the "Open Documentation" button when prompted about GOPATH not being set') view.window().run_command('golang_build') custom_view_settings = VIEW_SETTINGS.copy() del custom_view_settings['GOPATH'] open_file(file_path, custom_view_settings, _run_build) time.sleep(0.5) self.assertTrue(confirm_user('Were you prompted that GOPATH was not set?')) self.assertTrue(confirm_user('When you pressed "Open Documentation", was it opened in your browser?')) def ensure_not_ui_thread(): """ The tests won't function properly if they are run in the UI thread, so this functions throws an exception if that is attempted """ if isinstance(threading.current_thread(), threading._MainThread): raise RuntimeError('Tests can not be run in the UI thread') def open_file(file_path, view_settings, callback): """ Open a file in Sublime Text, sets settings on the view and then executes the callback once the file is opened :param file_path: A unicode string of the path to the file to open :param view_settings: A dict of settings to set the "golang" key of the view's settings to :param callback: The callback to execute in the UI thread once the file is opened """ result_queue = Queue() file_param = file_path if sys.platform == 'win32': file_param = re.sub('^([a-zA-Z]):', '/\\1', file_param) file_param = file_param.replace('\\', '/') def open_file_callback(): window = sublime.active_window() window.run_command( 'open_file', { 'file': file_param } ) when_file_opened(window, file_path, view_settings, callback, result_queue) sublime.set_timeout(open_file_callback, 50) return result_queue def when_file_opened(window, file_path, view_settings, callback, result_queue): """ Periodic polling callback used by open_file() to find the newly-opened file :param window: The sublime.Window to look for the view in :param file_path: The file path of the file that was opened :param view_settings: A dict of settings to set to the view's "golang" setting key :param callback: The callback to execute when the file is opened :param result_queue: A Queue() object the callback can use to communicate with the test """ view = window.active_view() if view and
((cmd != 'MEM=CLEAR' and echo_time >= self.timeout) or (cmd == 'MEM=CLEAR' and echo_time >= self.mem_clear_timeout)): # The command timed out reading back the echo of the command. # No need to read the values as it will also time out. # Log it and retry. In practice, the retry always works. log.info("%s: times: %f %f %f -retrying-" % (cmd, flush_time, cmd_time, echo_time)) log.info('%s: Reading cmd echo timed out (%f seconds), retrying.' % (cmd, echo_time)) # Retrying setting the time must be special cased as now a little # more than one second has passed. As such, redo the command # with the current time. if cmd.startswith("TIME=") and cmd != "TIME=?": cmd = self._compose_set_time_command() # Retry else: # Success, the reading of the echoed command did not time out. break if data != cmd and attempts > 1: # After retrying, the cmd always echoes back as an empty string. if data == '': log.info("%s: Accepting empty string as cmd echo." % cmd) else: raise weewx.WeeWxIOError( "command: Command failed: cmd='%s' reply='%s'" % (cmd, data)) t5 = time.time() retval = self.read() t6 = time.time() value_time = t6 - t5 if cmd == 'MEM=CLEAR': log.info("%s: times: %f %f %f %f" % (cmd, flush_time, cmd_time, echo_time, value_time)) if attempts > 1: if retval != '': log.info("%s: Retry worked. Total tries: %d" % (cmd, attempts)) else: log.info("%s: Retry failed." % cmd) log.info("%s: times: %f %f %f %f" % (cmd, flush_time, cmd_time, echo_time, value_time)) return retval def get_version(self): log.debug("Get firmware version") return self.command("VERSION") def reboot(self): # Reboot outputs the following (after the reboot): # .................... # <blank line> # Rainwise CC-3000 Version: 1.3 Build 022 Dec 02 2016 # Flash ID 202015 # Initializing memory...OK. log.debug("Rebooting CC3000.") self.send_cmd("REBOOT") time.sleep(5) dots = self.read() blank = self.read() ver = self.read() flash_id = self.read() init_msg = self.read() return [dots, blank, ver, flash_id, init_msg] # give the station some time to wake up. when we first hit it with a # command, it often responds with an empty string. then subsequent # commands get the proper response. so for a first command, send something # innocuous and wait a bit. hopefully subsequent commands will then work. # NOTE: This happens periodically and does not appear to be related to # "waking up". Getter commands now retry, so removing the sleep. def wakeup(self): self.command('ECHO=?') def set_echo(self, cmd='ON'): log.debug("Set echo to %s" % cmd) data = self.command('ECHO=%s' % cmd) if data != 'OK': raise weewx.WeeWxIOError("Set ECHO failed: %s" % data) def get_header(self): log.debug("Get header") data = self.command("HEADER") cols = data.split(',') if cols[0] != 'HDR': raise weewx.WeeWxIOError("Expected HDR, got %s" % cols[0]) return cols def set_auto(self): # auto does not echo the command self.send_cmd("AUTO") def get_current_data(self, send_now=True): data = '' if send_now: data = self.command("NOW") else: data = self.read() if data == 'NO DATA' or data == 'NO DATA RECEIVED': log.debug("No data from sensors") return [] return data.split(',') def get_time(self): # unlike all of the other accessor methods, the TIME command returns # OK after it returns the requested parameter. so we have to pop the # OK off the serial so it does not trip up other commands. log.debug("Get time") tstr = self.command("TIME=?") if tstr not in ['ERROR', 'OK']: data = self.read() if data != 'OK': raise weewx.WeeWxIOError("Failed to get time: %s, %s" % (tstr, data)) return tstr @staticmethod def _compose_set_time_command(): ts = time.time() tstr = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(ts)) log.info("Set time to %s (%s)" % (tstr, ts)) return "TIME=%s" % tstr def set_time(self): s = self._compose_set_time_command() data = self.command(s) if data != 'OK': raise weewx.WeeWxIOError("Failed to set time to %s: %s" % (s, data)) def get_dst(self): log.debug("Get daylight saving") return self.command("DST=?") def set_dst(self, dst): log.debug("Set DST to %s" % dst) # Firmware 1.3 Build 022 Dec 02 2016 returns 3 lines (<input-dst>,'',OK) data = self.command("DST=%s" % dst) # echoed input dst if data != dst: raise weewx.WeeWxIOError("Failed to set DST to %s: %s" % (dst, data)) data = self.read() # read '' if data not in ['ERROR', 'OK']: data = self.read() # read OK if data != 'OK': raise weewx.WeeWxIOError("Failed to set DST to %s: %s" % (dst, data)) def get_units(self): log.debug("Get units") return self.command("UNITS=?") def set_units(self, units): log.debug("Set units to %s" % units) data = self.command("UNITS=%s" % units) if data != 'OK': raise weewx.WeeWxIOError("Failed to set units to %s: %s" % (units, data)) def get_interval(self): log.debug("Get logging interval") return int(self.command("LOGINT=?")) def set_interval(self, interval=5): log.debug("Set logging interval to %d minutes" % interval) data = self.command("LOGINT=%d" % interval) if data != 'OK': raise weewx.WeeWxIOError("Failed to set logging interval: %s" % data) def get_channel(self): log.debug("Get channel") return self.command("STATION") def set_channel(self, channel): log.debug("Set channel to %d" % channel) if channel < 0 or 3 < channel: raise ValueError("Channel must be 0-3") data = self.command("STATION=%d" % channel) if data != 'OK': raise weewx.WeeWxIOError("Failed to set channel: %s" % data) def get_charger(self): log.debug("Get charger") return self.command("CHARGER") def get_baro(self): log.debug("Get baro") return self.command("BARO") def set_baro(self, offset): log.debug("Set barometer offset to %d" % offset) if offset != '0': parts = offset.split('.') if (len(parts) != 2 or (not (len(parts[0]) == 2 and len(parts[1]) == 2) and not (len(parts[0]) == 3 and len(parts[1]) == 1))): raise ValueError("Offset must be 0, XX.XX (inHg), or XXXX.X (mbar)") data = self.command("BARO=%d" % offset) if data != 'OK': raise weewx.WeeWxIOError("Failed to set baro: %s" % data) def get_memory_status(self): # query for logger memory use. output is something like this: # 6438 bytes, 111 records, 0% log.debug("Get memory status") return self.command("MEM=?") def get_max(self): log.debug("Get max values") # Return outside temperature, humidity, pressure, wind direction, # wind speed, rainfall (daily total), station voltage, inside # temperature. return self.command("MAX=?").split(',') def reset_max(self): log.debug("Reset max values") data = self.command("MAX=RESET") if data != 'OK': raise weewx.WeeWxIOError("Failed to reset max values: %s" % data) def get_min(self): log.debug("Get min values") # Return outside temperature, humidity, pressure, wind direction, # wind speed, rainfall (ignore), station voltage, inside temperature. return self.command("MIN=?").split(',') def reset_min(self): log.debug("Reset min values") data = self.command("MIN=RESET") if data != 'OK': raise weewx.WeeWxIOError("Failed to reset min values: %s" % data) def get_history_usage(self): # return the number of records in the logger s = self.get_memory_status() if 'records' in s: return int(s.split(',')[1].split()[0]) return None def clear_memory(self): log.debug("Clear memory") data = self.command("MEM=CLEAR") # It's a long wait for the OK. With a greatly increased timeout # just for MEM=CLEAR, we should be able to read the OK. if data == 'OK': log.info("MEM=CLEAR succeeded.") else: raise weewx.WeeWxIOError("Failed to clear memory: %s" % data) def get_rain(self): log.debug("Get rain total") # Firmware 1.3 Build 022 Dec 02 2017 returns OK after the rain count # This is like TIME=? rstr = self.command("RAIN") if rstr not in ['ERROR', 'OK']: data = self.read() if data != 'OK': raise weewx.WeeWxIOError("Failed to get rain: %s" % data) return rstr def reset_rain(self): log.debug("Reset rain counter") data = self.command("RAIN=RESET") if data != 'OK': raise weewx.WeeWxIOError("Failed to reset rain: %s" % data) def gen_records_since_ts(self, header, sensor_map, since_ts): if since_ts is None: since_ts = 0.0 num_records = 0 else: now_ts = time.mktime(datetime.datetime.now().timetuple()) nseconds = now_ts - since_ts nminutes = math.ceil(nseconds / 60.0) num_records = math.ceil(nminutes / float(self.get_interval())) if num_records == 0: log.debug('gen_records_since_ts: Asking for all records.') else: log.debug('gen_records_since_ts: Asking for %d records.' % num_records) for r in self.gen_records(nrec=num_records): pkt = CC3000Driver._parse_values(r[1:], header, sensor_map, "%Y/%m/%d %H:%M") if 'dateTime' in pkt and pkt['dateTime'] > since_ts: yield pkt def gen_records(self, nrec=0): """ Generator function for getting nrec records from the device. A value of 0 indicates all records. The CC3000 returns a header ('HDR,'), the archive records we are interested in ('REC,'), daily max and min records ('MAX,', 'MIN,') as well as messages for various events such as a reboot ('MSG,'). Things get interesting when nrec is non-zero. DOWNLOAD=n returns the latest n records in memory. The CC3000 does not distinguish between REC, MAX, MIN and MSG records in memory.
<reponame>clreda/patterning-scenario # -*- coding: utf-8 -*- from time import time from z3 import * import numpy as np from formula_to_truthtable import dict_to_tt from utils import * from build_conditions import * from patterns import precompute_pattern, precompute_diffusion from globals import change_grn ########################## ## SOLUTION PROCESSING ## ########################## #' Generate a model from a solution returned by the solver #' #' @param s Z3 solver object #' @param nexp number of trajectories #' @param m number of binary variables #' @param npatt number of possible patterning functions #' @param nfields number of fields #' @param k maximum length of trajectories #' @param nsteps maximum number of selected patterns #' @param bv_vars dictionary of bit-vectors describing the (potential) solutions #' @param level_idx_lists lists of #' ([gene identifier, updated expression value, corresponding binary variable], #' list of same-gene binary variable indices) for each gene #' @param multi_binary_dict dictionary which contains the full mapping from binary #' variables to real genes and the way back #' @param bvectors enumeration of Boolean vectors for the binary variables in the model #' @param res_list list of solution models so far #' @param pattern_matrix0 if not None, allows predictions using the patterning matrix #' (of selected patterning functions) provided #' @param patterns dictionary of patterning functions #' @param uniqueness character string for solution uniqueness #' @param verbose logical for printing messages #' @param debug logical for printing (hopefully) useful insights to the code #' @return res [updated solution list, updated solver with uniqueness condition, #' logical indicating if the solver should stop looking for conditions] def get_solutions(s, nexp, m, npatt, nfields, k, nsteps, bv_vars, level_idx_lists, multi_binary_dict, bvectors, patterns, uniqueness, res_list=[], pattern_matrix0=None, verbose=False, debug=False): no_more_model = False M = None print("\n-------------------------\n-- The instance is " + str(s.check()) + "istiable") try: M = s.model() except: if (res_list): print("\nNo other model found.") else: print("\nNo model found.") no_more_model = True return([res_list, s, no_more_model]) if (M): print("\nMODEL #" + str(len(res_list)+1) + " :") if (pattern_matrix0 == None): ## COMPUTATION OF THE PATTERN MATRIX (patterning function selection) ## pattern_matrix[p, t] = is patterning function #p applied ## at time step t? == 1 iff. bit-vector Pattern^(t=t,u)_p > 0 ## iff. patterning[t]_p == true pattern_matrix = np.zeros((npatt, nsteps)) for t in range(nsteps): vect = get_binary_dec(M[bv_vars.get("patterning")[t]], npatt) for p in range(npatt): pattern_matrix[p, t] = int(vect[p]) else: pattern_matrix = pattern_matrix0 if (debug): print("\n* Pattern matrix:") print("t= " + concat([str(i)+" "*(3-len(str(i))+1) for i in range(nsteps)])) print(pattern_matrix) if (verbose): print("\n** Summary") for t in range(nsteps): selected = filter(lambda p: pattern_matrix[p, t]==1, range(npatt)) ## Print selected patterning functions if (not selected): print("t=" + str(t) + ": No pattern function applied") else: get_vgene = lambda i : multi_binary_dict.get("vgenes")[ multi_binary_dict.get("map_to_vgenes")[ level_idx_lists[i][1][0] ][0] ][0] morphogens = map(get_vgene, selected) print("t=" + str(t) + ": morphogen" + ifthenelse(len(selected)>1, "s", "") + " " + concat(morphogens, ", ")) ## Prints diffusion for each morphogen print(concat(["Field #" + str(i) for i in range(nfields)], " | ")) for i in range(len(selected)): print(morphogens[i] + ": " + concat([str(x[1]) for x in level_idx_lists[selected[i]][0]], " | ")) ## COMPUTATION OF THE TRAJECTORY MATRIX ## (i.e. evolution of system state through time and position) ## trajectories[exp, t, n] = system state (of size m = #genes) at step #t, ## in field #n, in experiment #exp trajectories = np.zeros((nexp, k+1, nfields, m)) for e in range(nexp): for t in range(k+1): for n in range(nfields): vect = get_binary_dec(M[bv_vars.get("states")[e][t][n]], m) trajectories[e, t, n, 0:] = np.reshape(map(int, vect), (1, m)) if (verbose): print("\n* Trajectories") for exp in range(len(bv_vars.get('states'))): print("\n** Trajectory #" + str(exp+1)) for t in range(k+1): print("\n-- Time step t=" + str(t)) print(concat(multi_binary_dict.get("genes"), " | ")) for n in range(nfields): print(get_binary_dec(M[bv_vars.get('states')[exp][t][n]], m)) if (debug): print("\n* State transitions") for exp in range(len(bv_vars.get('states'))): print("\n** Trajectory #" + str(exp+1)) for n in range(nfields): print("\n* Field #" + str(n+1)) print("-- Time:" + " "*len(str(k)) + concat(multi_binary_dict.get("genes"), " | ")) for t in range(k+1): pr = lambda name : [get_binary_dec(M[bv_vars.get(name)[exp][t][n]], m)[i] + " "*(len(multi_binary_dict.get("genes")[i])-1) for i in range(m)] print("-- t = " + str(t) + " "*(len(str(k))-len(str(t))) + ": " + concat(pr("states"), " | ")) if (t < k): if (len(bv_vars.get('patterning')) > t): patt_select = get_binary_dec(M[bv_vars.get('patterning')[t]], npatt) print("Patterning selection: [" + patt_select + "]") lst = filter_nones([ifthenelse(int(patt_select[i]), patterns[i]['morphogen']) for i in range(npatt)]) if (len(lst)>0): print("Morphogen(s): " + concat(lst, ",")) else: print("Morphogen(s): none") print("=> (Patt)" + " "*len(str(k)) + concat(pr("updated_states"), " | ")) print("=> (GRNs)" + " "*len(str(k)) + concat(pr("diffusion_states"), " | ")) print("=> (Diff) -> ") ## GET THE RESULTS OF GRNs ## (if change_grn == True) if ("grns_bv" in bv_vars): grns = np.zeros((nexp, k, nfields, len(bvectors), len(multi_binary_dict["genes"]))) all_grns = [ffkq for q in bv_vars.get("grns_bv") for kq in q for fkq in kq for ffkq in fkq] #print([M[x] for x in all_grns]) for x in bv_vars.get("grns_bv"): for t in range(k): for n in range(nfields): for j in range(m): vect = get_binary_dec(M.evaluate(x[t][n][j], model_completion=False), len(bvectors)) grns[e, t, n, 0:, j] = np.reshape(map(int, vect), (1, len(bvectors))) if (verbose): print("\n* GRNs") for exp in range(nexp): print("\n** GRN in exp=" + str(exp+1)) for t in range(k): print("\n-- Time step t=" + str(t)) for n in range(nfields): print("\n-- Field #" + str(n+1)) for j in range(m): print("\n" + concat(multi_binary_dict.get("genes"), " | ")) for b in range(len(bvectors)): print(concat(map(str, bvectors[b]), " | ") + ":" + get_binary_dec( M[bv_vars.get('grns_bv')[exp][t][n][j]], len(bvectors))[b] + " for gene " + multi_binary_dict["genes"][j]) else: grns = None res_list.append([pattern_matrix, trajectories, grns]) if ("patterning" in uniqueness and pattern_matrix0==None): ## Uniqueness of models: selection of patterning ## ## Add condition patterning != SOLUTION[patterning] ## print("\n-- Adding uniqueness condition for patterning variables") s = difference_model(s, bv_vars, M, "patterning", verbose=verbose, debug=debug) if ("grns" in uniqueness and "grns_bv" in bv_vars.keys()): ## Uniqueness of models: selection of GRNs ## ## Add condition {GRNs} != {SOLUTION[GRNs]} ## print("\n-- Adding uniqueness condition for GRN variables") all_grns = [ffkq for q in bv_vars.get("grns_bv") for kq in q for fkq in kq for ffkq in fkq] bv_vars.update({"all_grns": all_grns}) s = difference_model(s, bv_vars, M, "all_grns", verbose=verbose, debug=debug) if ("states" in uniqueness): ## Uniqueness of models: state trajectories ## ## Add condition {states} != {SOLUTION[states]} ## print("\n-- Adding uniqueness condition for state variables") all_states = [fkq for q in bv_vars.get("states") for kq in q for fkq in kq] bv_vars.update({"all_states": all_states}) s = difference_model(s, bv_vars, M, "all_states", verbose=verbose, debug=debug) return([res_list, s, no_more_model]) ########################## ## PATTERNING INFERENCE ## ########################## #' Solve an instance of the patterning inference problem #' #' Given a set of putative patterning factors, the number of steps, #' some info about experimental data, find the order, the morphogen(s), #' the diffusion type and the source of the patterning agent. #' #' @param params output of read_files function in models.py #' that describes the model and the observations #' @param pattern_matrix0 if not None, allows predictions using the patterning matrix #' (of selected patterning functions) provided #' @param solmax if not None, the solver will enumerate up to solmax solutions #' @return res_list list of models + list of possible patterns + mapping between binary and multi-level variables def pattern_solver(params, pattern_matrix0=None, solmax=None, verbose=False, debug=False): [genes, patterns, directives, constants, fields, Observations, Fixpoint, GRNs] = params multi_binary_dict = multi_to_binary(genes) nfields = len(fields) m = len(multi_binary_dict.get("genes")) k = directives.get("nsteps") uniqueness = directives.get("uniqueness") diffusion_rate = constants.get("diffusion-rate") ## Pattern functions should be selected before this step patterning_step = directives.get("patterning_step") npatt = len(patterns) exp_names = list(set([oo["name"] for oo in Observations])) nexp = len(exp_names) ngrns = len(GRNs.items()) solmax = ifthenelse(not solmax, directives.get("limit"), solmax) if (not(pattern_matrix0==None)): ## Uniqueness in pattern selection is not required ## Need uniqueness at least for state trajectories uniqueness = filter(lambda x:not(x == "patterning"), list(set(uniqueness+['states']))) if (verbose): print("\nPARAMETER VALUES:") lstt = [multi_binary_dict.get("genes"), ["id " + str(f[0]) + " = " + concat(f[1], ",") for f in fields], nfields, m, k, npatt, solmax, nexp, ngrns, patterning_step, aggregation_function, application_function, uniqueness, max_nb_patterns_per_level, max_nb_patterns, min_nb_patterns, max_nb_pattern_times, change_grn, diffusion_rate] lst = ["genes", "fields", "#fields", "#genes", "#steps", "#patterns", "solmax", "#experiments", "#grns", "patterning end step", "aggregation", "application", "uniqueness", "max. #patt/step", "max. #patt", "min. #patt", "max. #times/patt", "allowing GRN changes?", "diffusion rate"] for i in range(len(lst)): print(lst[i] + ": " + str(lstt[i])) #____________________________________________________# # Initialization of constants and variables # #____________________________________________________# s = Solver() ## Patterning selection matrix   ## ## Same for every experiment ## ## patterning_step should be <= nsteps ## nsteps = ifthenelse(patterning_step==0, k, min(patterning_step, k)) if (pattern_matrix0 == None): patterning = [BitVec('Step^' + str(i), npatt) for i in range(nsteps)] else: _, nsteps = np.shape(pattern_matrix0) pattern_selection = [filter(lambda j: pattern_matrix0[j, i]==1, range(npatt)) for i in range(nsteps)] patterning = [idxList2BV(pattern_selection[i], npatt) for i in range(nsteps)] ## Phenotype for each field at each step ## state_name = lambda i, e, j : 'State^' + str(i) + '_(' + str(e) + ', ' + str(j) + ')' states = [[[BitVec(state_name(i, e, j), m) for j in range(nfields)] for i in range(k+1)] for e in range(nexp)] updated_states = [] diffusion_states = [] ## Pattern vectors for each possible patterning function pattern_name = lambda i, j, p : 'Pattern^(t=' + str(i) + ',' + str(j) + ')_' + str(p) ei_ls = [[[BitVec(pattern_name(i, j, p), m) for j in range(nfields)] for i in range(nsteps)] for p in range(npatt)] patterning_name = lambda i, j : 'Pattern^(t=' + str(i) + ',' + str(j) + ')' e_ls = [[BitVec(patterning_name(i, j), m) for j in range(nfields)] for i in range(nsteps)] ## Diffusion vectors from each possible gene product from each possible source field
<filename>sysroot/usr/lib/python3/dist-packages/apt/cache.py # cache.py - apt cache abstraction # # Copyright (c) 2005-2009 Canonical # # Author: <NAME> <<EMAIL>> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA from __future__ import print_function import fnmatch import os import warnings import weakref try: from typing import (Any, Callable, Dict, Iterator, List, Optional, Set, Tuple, Union, cast, KeysView) Any # pyflakes Callable # pyflakes Dict # pyflakes Iterator # pyflakes KeysView # pyflakes List # pyflakes Optional # pyflakes Set # pyflakes Tuple # pyflakes Union # pyflakes except ImportError: def cast(typ, obj): # type: ignore return obj pass import apt_pkg from apt.package import Package, Version import apt.progress.text from apt.progress.base import AcquireProgress, InstallProgress, OpProgress OpProgress # pyflakes InstallProgress # pyflakes AcquireProgress # pyflakes Version # pyflakes class FetchCancelledException(IOError): """Exception that is thrown when the user cancels a fetch operation.""" class FetchFailedException(IOError): """Exception that is thrown when fetching fails.""" class UntrustedException(FetchFailedException): """Exception that is thrown when fetching fails for trust reasons""" class LockFailedException(IOError): """Exception that is thrown when locking fails.""" class CacheClosedException(Exception): """Exception that is thrown when the cache is used after close().""" class _WrappedLock(object): """Wraps an apt_pkg.FileLock to raise LockFailedException. Initialized using a directory path.""" def __init__(self, path): # type: (str) -> None self._path = path self._lock = apt_pkg.FileLock(os.path.join(path, "lock")) def __enter__(self): # type: () -> None try: return self._lock.__enter__() except apt_pkg.Error as e: raise LockFailedException(("Failed to lock directory %s: %s") % (self._path, e)) def __exit__(self, typ, value, traceback): # type: (object, object, object) -> None return self._lock.__exit__(typ, value, traceback) class Cache(object): """Dictionary-like package cache. The APT cache file contains a hash table mapping names of binary packages to their metadata. A Cache object is the in-core representation of the same. It provides access to APTs idea of the list of available packages. The cache can be used like a mapping from package names to Package objects (although only getting items is supported). Keyword arguments: progress -- a OpProgress object, rootdir -- an alternative root directory. if that is given the system sources.list and system lists/files are not read, only file relative to the given rootdir, memonly -- build the cache in memory only. .. versionchanged:: 1.0 The cache now supports package names with special architecture qualifiers such as :all and :native. It does not export them in :meth:`keys()`, though, to keep :meth:`keys()` a unique set. """ def __init__(self, progress=None, rootdir=None, memonly=False): # type: (OpProgress, str, bool) -> None self._cache = cast(apt_pkg.Cache, None) # type: apt_pkg.Cache self._depcache = cast(apt_pkg.DepCache, None) # type: apt_pkg.DepCache self._records = cast(apt_pkg.PackageRecords, None) # type: apt_pkg.PackageRecords # nopep8 self._list = cast(apt_pkg.SourceList, None) # type: apt_pkg.SourceList self._callbacks = {} # type: Dict[str, List[Union[Callable[..., None],str]]] # nopep8 self._callbacks2 = {} # type: Dict[str, List[Tuple[Callable[..., Any], Tuple[Any, ...], Dict[Any,Any]]]] # nopep8 self._weakref = weakref.WeakValueDictionary() # type: weakref.WeakValueDictionary[str, apt.Package] # nopep8 self._weakversions = weakref.WeakSet() # type: weakref.WeakSet[Version] # nopep8 self._changes_count = -1 self._sorted_set = None # type: Optional[List[str]] self.connect("cache_post_open", "_inc_changes_count") self.connect("cache_post_change", "_inc_changes_count") if memonly: # force apt to build its caches in memory apt_pkg.config.set("Dir::Cache::pkgcache", "") if rootdir: rootdir = os.path.abspath(rootdir) if os.path.exists(rootdir + "/etc/apt/apt.conf"): apt_pkg.read_config_file(apt_pkg.config, rootdir + "/etc/apt/apt.conf") if os.path.isdir(rootdir + "/etc/apt/apt.conf.d"): apt_pkg.read_config_dir(apt_pkg.config, rootdir + "/etc/apt/apt.conf.d") apt_pkg.config.set("Dir", rootdir) apt_pkg.config.set("Dir::State::status", rootdir + "/var/lib/dpkg/status") # also set dpkg to the rootdir path so that its called for the # --print-foreign-architectures call apt_pkg.config.set("Dir::bin::dpkg", os.path.join(rootdir, "usr", "bin", "dpkg")) # create required dirs/files when run with special rootdir # automatically self._check_and_create_required_dirs(rootdir) # Call InitSystem so the change to Dir::State::Status is actually # recognized (LP: #320665) apt_pkg.init_system() # Prepare a lock object (context manager for archive lock) archive_dir = apt_pkg.config.find_dir("Dir::Cache::Archives") self._archive_lock = _WrappedLock(archive_dir) self.open(progress) def fix_broken(self): # type: () -> None """Fix broken packages.""" self._depcache.fix_broken() def _inc_changes_count(self): # type: () -> None """Increase the number of changes""" self._changes_count += 1 def _check_and_create_required_dirs(self, rootdir): # type: (str) -> None """ check if the required apt directories/files are there and if not create them """ files = ["/var/lib/dpkg/status", "/etc/apt/sources.list", ] dirs = ["/var/lib/dpkg", "/etc/apt/", "/var/cache/apt/archives/partial", "/var/lib/apt/lists/partial", ] for d in dirs: if not os.path.exists(rootdir + d): #print "creating: ", rootdir + d os.makedirs(rootdir + d) for f in files: if not os.path.exists(rootdir + f): open(rootdir + f, "w").close() def _run_callbacks(self, name): # type: (str) -> None """ internal helper to run a callback """ if name in self._callbacks: for callback in self._callbacks[name]: if callback == '_inc_changes_count': self._inc_changes_count() else: callback() # type: ignore if name in self._callbacks2: for callback, args, kwds in self._callbacks2[name]: callback(self, *args, **kwds) def open(self, progress=None): # type: (OpProgress) -> None """ Open the package cache, after that it can be used like a dictionary """ if progress is None: progress = apt.progress.base.OpProgress() # close old cache on (re)open self.close() self.op_progress = progress self._run_callbacks("cache_pre_open") self._cache = apt_pkg.Cache(progress) self._depcache = apt_pkg.DepCache(self._cache) self._records = apt_pkg.PackageRecords(self._cache) self._list = apt_pkg.SourceList() self._list.read_main_list() self._sorted_set = None self.__remap() self._have_multi_arch = len(apt_pkg.get_architectures()) > 1 progress.done() self._run_callbacks("cache_post_open") def __remap(self): # type: () -> None """Called after cache reopen() to relocate to new cache. Relocate objects like packages and versions from the old underlying cache to the new one. """ for key in list(self._weakref.keys()): try: pkg = self._weakref[key] except KeyError: continue try: pkg._pkg = self._cache[pkg._pkg.name, pkg._pkg.architecture] except LookupError: del self._weakref[key] for ver in list(self._weakversions): # Package has been reseated above, reseat version for v in ver.package._pkg.version_list: # Requirements as in debListParser::SameVersion if (v.hash == ver._cand.hash and (v.size == 0 or ver._cand.size == 0 or v.size == ver._cand.size) and v.multi_arch == ver._cand.multi_arch and v.ver_str == ver._cand.ver_str): ver._cand = v break else: self._weakversions.remove(ver) def close(self): # type: () -> None """ Close the package cache """ # explicitely free the FDs that _records has open del self._records self._records = cast(apt_pkg.PackageRecords, None) def __enter__(self): # type: () -> Cache """ Enter the with statement """ return self def __exit__(self, exc_type, exc_value, traceback): # type: (object, object, object) -> None """ Exit the with statement """ self.close() def __getitem__(self, key): # type: (object) -> Package """ look like a dictionary (get key) """ try: key = str(key) rawpkg = self._cache[key] except KeyError: raise KeyError('The cache has no package named %r' % key) # It might be excluded due to not having a version or something if not self.__is_real_pkg(rawpkg): raise KeyError('The cache has no package named %r' % key) pkg = self._rawpkg_to_pkg(rawpkg) return pkg def get(self, key, default=None): # type: (object, object) -> Any """Return *self*[*key*] or *default* if *key* not in *self*. .. versionadded:: 1.1 """ try: return self[key] except KeyError: return default def _rawpkg_to_pkg(self, rawpkg): # type: (apt_pkg.Package) -> Package """Returns the apt.Package object for an apt_pkg.Package object. .. versionadded:: 1.0.0 """ fullname = rawpkg.get_fullname(pretty=True) return self._weakref.setdefault(fullname, Package(self, rawpkg)) def __iter__(self): # type: () -> Iterator[Package] # We iterate sorted over package names here. With this we read the # package lists linearly if we need to access the package records, # instead of having to do thousands of random seeks; the latter # is disastrous if we use compressed package indexes, and slower than # necessary for uncompressed indexes. for pkgname in self.keys(): pkg = Package(self, self._cache[pkgname]) yield self._weakref.setdefault(pkgname, pkg) def __is_real_pkg(self, rawpkg): # type: (apt_pkg.Package) -> bool """Check if the apt_pkg.Package provided is a real package.""" return rawpkg.has_versions def has_key(self, key): # type: (object) -> bool return key in self def __contains__(self, key): # type: (object) -> bool try: return self.__is_real_pkg(self._cache[str(key)]) except KeyError: return False def __len__(self): # type: () -> int return len(self.keys())
-> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "SpacerProps(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) @jsii.enum(jsii_type="@aws-cdk/aws-cloudwatch.Statistic") class Statistic(enum.Enum): """Statistic to use over the aggregation period.""" SAMPLE_COUNT = "SAMPLE_COUNT" """The count (number) of data points used for the statistical calculation.""" AVERAGE = "AVERAGE" """The value of Sum / SampleCount during the specified period.""" SUM = "SUM" """All values submitted for the matching metric added together. This statistic can be useful for determining the total volume of a metric. """ MINIMUM = "MINIMUM" """The lowest value observed during the specified period. You can use this value to determine low volumes of activity for your application. """ MAXIMUM = "MAXIMUM" """The highest value observed during the specified period. You can use this value to determine high volumes of activity for your application. """ @jsii.data_type( jsii_type="@aws-cdk/aws-cloudwatch.TextWidgetProps", jsii_struct_bases=[], name_mapping={"markdown": "markdown", "height": "height", "width": "width"}, ) class TextWidgetProps: def __init__( self, *, markdown: builtins.str, height: typing.Optional[jsii.Number] = None, width: typing.Optional[jsii.Number] = None, ) -> None: """Properties for a Text widget. :param markdown: The text to display, in MarkDown format. :param height: Height of the widget. Default: 2 :param width: Width of the widget, in a grid of 24 units wide. Default: 6 """ self._values: typing.Dict[str, typing.Any] = { "markdown": markdown, } if height is not None: self._values["height"] = height if width is not None: self._values["width"] = width @builtins.property def markdown(self) -> builtins.str: """The text to display, in MarkDown format.""" result = self._values.get("markdown") assert result is not None, "Required property 'markdown' is missing" return result @builtins.property def height(self) -> typing.Optional[jsii.Number]: """Height of the widget. :default: 2 """ result = self._values.get("height") return result @builtins.property def width(self) -> typing.Optional[jsii.Number]: """Width of the widget, in a grid of 24 units wide. :default: 6 """ result = self._values.get("width") return result def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "TextWidgetProps(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) @jsii.enum(jsii_type="@aws-cdk/aws-cloudwatch.TreatMissingData") class TreatMissingData(enum.Enum): """Specify how missing data points are treated during alarm evaluation.""" BREACHING = "BREACHING" """Missing data points are treated as breaching the threshold.""" NOT_BREACHING = "NOT_BREACHING" """Missing data points are treated as being within the threshold.""" IGNORE = "IGNORE" """The current alarm state is maintained.""" MISSING = "MISSING" """The alarm does not consider missing data points when evaluating whether to change state.""" @jsii.enum(jsii_type="@aws-cdk/aws-cloudwatch.Unit") class Unit(enum.Enum): """Unit for metric.""" SECONDS = "SECONDS" """Seconds.""" MICROSECONDS = "MICROSECONDS" """Microseconds.""" MILLISECONDS = "MILLISECONDS" """Milliseconds.""" BYTES = "BYTES" """Bytes.""" KILOBYTES = "KILOBYTES" """Kilobytes.""" MEGABYTES = "MEGABYTES" """Megabytes.""" GIGABYTES = "GIGABYTES" """Gigabytes.""" TERABYTES = "TERABYTES" """Terabytes.""" BITS = "BITS" """Bits.""" KILOBITS = "KILOBITS" """Kilobits.""" MEGABITS = "MEGABITS" """Megabits.""" GIGABITS = "GIGABITS" """Gigabits.""" TERABITS = "TERABITS" """Terabits.""" PERCENT = "PERCENT" """Percent.""" COUNT = "COUNT" """Count.""" BYTES_PER_SECOND = "BYTES_PER_SECOND" """Bytes/second (B/s).""" KILOBYTES_PER_SECOND = "KILOBYTES_PER_SECOND" """Kilobytes/second (kB/s).""" MEGABYTES_PER_SECOND = "MEGABYTES_PER_SECOND" """Megabytes/second (MB/s).""" GIGABYTES_PER_SECOND = "GIGABYTES_PER_SECOND" """Gigabytes/second (GB/s).""" TERABYTES_PER_SECOND = "TERABYTES_PER_SECOND" """Terabytes/second (TB/s).""" BITS_PER_SECOND = "BITS_PER_SECOND" """Bits/second (b/s).""" KILOBITS_PER_SECOND = "KILOBITS_PER_SECOND" """Kilobits/second (kb/s).""" MEGABITS_PER_SECOND = "MEGABITS_PER_SECOND" """Megabits/second (Mb/s).""" GIGABITS_PER_SECOND = "GIGABITS_PER_SECOND" """Gigabits/second (Gb/s).""" TERABITS_PER_SECOND = "TERABITS_PER_SECOND" """Terabits/second (Tb/s).""" COUNT_PER_SECOND = "COUNT_PER_SECOND" """Count/second.""" NONE = "NONE" """No unit.""" @jsii.data_type( jsii_type="@aws-cdk/aws-cloudwatch.YAxisProps", jsii_struct_bases=[], name_mapping={ "label": "label", "max": "max", "min": "min", "show_units": "showUnits", }, ) class YAxisProps: def __init__( self, *, label: typing.Optional[builtins.str] = None, max: typing.Optional[jsii.Number] = None, min: typing.Optional[jsii.Number] = None, show_units: typing.Optional[builtins.bool] = None, ) -> None: """Properties for a Y-Axis. :param label: The label. Default: - No label :param max: The max value. Default: - No maximum value :param min: The min value. Default: 0 :param show_units: Whether to show units. Default: true """ self._values: typing.Dict[str, typing.Any] = {} if label is not None: self._values["label"] = label if max is not None: self._values["max"] = max if min is not None: self._values["min"] = min if show_units is not None: self._values["show_units"] = show_units @builtins.property def label(self) -> typing.Optional[builtins.str]: """The label. :default: - No label """ result = self._values.get("label") return result @builtins.property def max(self) -> typing.Optional[jsii.Number]: """The max value. :default: - No maximum value """ result = self._values.get("max") return result @builtins.property def min(self) -> typing.Optional[jsii.Number]: """The min value. :default: 0 """ result = self._values.get("min") return result @builtins.property def show_units(self) -> typing.Optional[builtins.bool]: """Whether to show units. :default: true """ result = self._values.get("show_units") return result def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "YAxisProps(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) @jsii.data_type( jsii_type="@aws-cdk/aws-cloudwatch.AlarmProps", jsii_struct_bases=[CreateAlarmOptions], name_mapping={ "evaluation_periods": "evaluationPeriods", "threshold": "threshold", "actions_enabled": "actionsEnabled", "alarm_description": "alarmDescription", "alarm_name": "alarmName", "comparison_operator": "comparisonOperator", "datapoints_to_alarm": "datapointsToAlarm", "evaluate_low_sample_count_percentile": "evaluateLowSampleCountPercentile", "period": "period", "statistic": "statistic", "treat_missing_data": "treatMissingData", "metric": "metric", }, ) class AlarmProps(CreateAlarmOptions): def __init__( self, *, evaluation_periods: jsii.Number, threshold: jsii.Number, actions_enabled: typing.Optional[builtins.bool] = None, alarm_description: typing.Optional[builtins.str] = None, alarm_name: typing.Optional[builtins.str] = None, comparison_operator: typing.Optional[ComparisonOperator] = None, datapoints_to_alarm: typing.Optional[jsii.Number] = None, evaluate_low_sample_count_percentile: typing.Optional[builtins.str] = None, period: typing.Optional[aws_cdk.core.Duration] = None, statistic: typing.Optional[builtins.str] = None, treat_missing_data: typing.Optional[TreatMissingData] = None, metric: IMetric, ) -> None: """Properties for Alarms. :param evaluation_periods: The number of periods over which data is compared to the specified threshold. :param threshold: The value against which the specified statistic is compared. :param actions_enabled: Whether the actions for this alarm are enabled. Default: true :param alarm_description: Description for the alarm. Default: No description :param alarm_name: Name of the alarm. Default: Automatically generated name :param comparison_operator: Comparison to use to check if metric is breaching. Default: GreaterThanOrEqualToThreshold :param datapoints_to_alarm: The number of datapoints that must be breaching to trigger the alarm. This is used only if you are setting an "M out of N" alarm. In that case, this value is the M. For more information, see Evaluating an Alarm in the Amazon CloudWatch User Guide. Default: ``evaluationPeriods`` :param evaluate_low_sample_count_percentile: Specifies whether to evaluate the data and potentially change the alarm state if there are too few data points to be statistically significant. Used only for alarms that are based on percentiles. Default: - Not configured. :param period: (deprecated) The period over which the specified statistic is applied. Cannot be used with ``MathExpression`` objects. Default: - The period from the metric :param statistic: (deprecated) What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Cannot be used with ``MathExpression`` objects. Default: - The statistic from the metric :param treat_missing_data: Sets how this alarm is to handle missing data points. Default: TreatMissingData.Missing :param metric: The metric to add the alarm on. Metric objects can be obtained from most resources, or you can construct custom Metric objects by instantiating one. """ self._values: typing.Dict[str, typing.Any] = { "evaluation_periods": evaluation_periods, "threshold": threshold, "metric": metric, } if actions_enabled is not None: self._values["actions_enabled"] = actions_enabled if alarm_description is not None: self._values["alarm_description"] = alarm_description if alarm_name is not None: self._values["alarm_name"] = alarm_name if comparison_operator is not None: self._values["comparison_operator"] = comparison_operator if datapoints_to_alarm is not None: self._values["datapoints_to_alarm"] = datapoints_to_alarm if evaluate_low_sample_count_percentile is not None: self._values["evaluate_low_sample_count_percentile"] = evaluate_low_sample_count_percentile if period is not None: self._values["period"] = period if statistic is not None: self._values["statistic"] = statistic if treat_missing_data is not None: self._values["treat_missing_data"] = treat_missing_data @builtins.property def evaluation_periods(self) -> jsii.Number: """The number of periods over which data is compared to the specified threshold.""" result = self._values.get("evaluation_periods") assert result is not None, "Required property 'evaluation_periods' is missing" return result @builtins.property def threshold(self) -> jsii.Number: """The value against which the specified statistic is compared.""" result = self._values.get("threshold") assert result is not None, "Required property 'threshold' is missing" return result @builtins.property def actions_enabled(self) -> typing.Optional[builtins.bool]: """Whether the actions for this alarm are enabled. :default: true """ result = self._values.get("actions_enabled") return result @builtins.property def alarm_description(self) -> typing.Optional[builtins.str]: """Description for
<reponame>yurj/timeboard<gh_stars>100-1000 from __future__ import division from .exceptions import (OutOfBoundsError, VoidIntervalError, UnacceptablePeriodError) from .workshift import Workshift from .core import _Frame, _Schedule, VOID_TIME class _BaseInterval(object): """Parent class for Interval and VoidInternal""" def __init__(self, timeboard, bounds, schedule=None): def handle_bound(bound): if isinstance(bound, Workshift): loc = bound._loc elif isinstance(bound, int): loc = bound else: raise TypeError('Interval bound = {}: expected integer or ' 'Workshift, received {}'. format(bound, type(bound))) if not 0 <= loc < len(timeboard._timeline): raise OutOfBoundsError( "Interval bound {} is outside timeboard {}". format(bound, timeboard.compact_str)) return loc if not hasattr(bounds, '__getitem__'): raise TypeError("`bounds` parameter must be list-like") try: bound0 = bounds[0] bound1 = bounds[1] except IndexError: raise IndexError("`bounds` value must contain two items") locs = (handle_bound(bound0), handle_bound(bound1)) self._tb = timeboard self._loc = locs try: is_void = self.__class__.IS_VOID except AttributeError: is_void = False if is_void: if locs[0] <= locs[1]: raise VoidIntervalError( 'Attempted to instantiate void interval with valid ' 'locations:' ' {!r}'.format(locs)) self._length = 0 else: if locs[0] > locs[1]: raise VoidIntervalError( 'Attempted to create empty interval with {!r}'.format(locs)) self._length = locs[1] - locs[0] + 1 if schedule is None: self._schedule = timeboard.default_schedule else: self._schedule = schedule if not isinstance(self._schedule, _Schedule): raise TypeError('Wrong type of schedule. Expected _Schedule,' ' received {}'.format(type(schedule))) class Interval(_BaseInterval): """A sequence of workshifts within the timeboard. Interval is defined by two positions on the timeline which are the zero-based sequence numbers of the first and the last workshifts of the interval. An interval can contain one or more workshifts; the empty interval is not allowed. Duty status of the workshifts within the interval is interpreted by the given schedule. In addition to the methods defined for intervals, you can use interval as a generator that yields the workshifts of the interval, from the first to the last. Parameters ---------- timeboard : :py:class:`.Timeboard` bounds : tuple(int, int) or tuple(Workshift, Workshift) The two elements of `bounds` provide the positions of the first and the last workshifts of the interval within the timeline. The element's type is either non-negative integer or :py:class:`.Workshift`. schedule : _Schedule, optional If not given, the timeboard's default schedule is used. Raises ------ VoidIntervalError If `bounds` are in the reverse order. OutOfBoundsError If any of `bounds` points outside the timeboard. Attributes ---------- start_time : Timestamp When the first workshift of the interval starts. end_time : Timestamp When the last workshift of the interval ends. length : int Number of workshifts in the interval. You can also call `len()` function for an interval which returns the same value. schedule : _Schedule Schedule used by interval's methods unless explicitly redefined in the method call. Use `name` attribute of `schedule` to review its identity. Examples -------- >>> clnd = tb.Timeboard('D', '30 Sep 2017', '15 Oct 2017', layout=[0,1]) >>> ivl = tb.interval.Interval(clnd, (2,9)) >>> ivl Interval((2, 9)): 'D' at 2017-10-02 -> 'D' at 2017-10-09 [8] >>> len(ivl) 8 >>> for ws in ivl: ... print (ws.start_time, "\t", ws.label) 2017-10-02 00:00:00 0.0 2017-10-03 00:00:00 1.0 2017-10-04 00:00:00 0.0 2017-10-05 00:00:00 1.0 2017-10-06 00:00:00 0.0 2017-10-07 00:00:00 1.0 2017-10-08 00:00:00 0.0 2017-10-09 00:00:00 1.0 The following operations consume memory to hold the data for the entire interval. >>> list(ivl) [Workshift(2) of 'D' at 2017-10-02, Workshift(3) of 'D' at 2017-10-03, Workshift(4) of 'D' at 2017-10-04, Workshift(5) of 'D' at 2017-10-05, Workshift(6) of 'D' at 2017-10-06, Workshift(7) of 'D' at 2017-10-07, Workshift(8) of 'D' at 2017-10-08, Workshift(9) of 'D' at 2017-10-09] >>> print(ivl) Interval((2, 9)): 'D' at 2017-10-02 -> 'D' at 2017-10-09 [8] <BLANKLINE> ws_ref start duration end label on_duty loc 2 2017-10-02 2017-10-02 1 2017-10-02 0.0 False 3 2017-10-03 2017-10-03 1 2017-10-03 1.0 True 4 2017-10-04 2017-10-04 1 2017-10-04 0.0 False 5 2017-10-05 2017-10-05 1 2017-10-05 1.0 True 6 2017-10-06 2017-10-06 1 2017-10-06 0.0 False 7 2017-10-07 2017-10-07 1 2017-10-07 1.0 True 8 2017-10-08 2017-10-08 1 2017-10-08 0.0 False 9 2017-10-09 2017-10-09 1 2017-10-09 1.0 True See also -------- .Timeboard.get_interval : provides convenient ways to instantiate an interval instead of calling `Interval()` constructor directly. Moreover, in many cases, you can shortcut a :py:meth:`get_interval` call by calling the instance of :py:class:`Timeboard` itself. .workshifts Return the generator that yields workshifts with the specified duty. """ def _repr_schedule_label(self): schedule_label = self.schedule.name if schedule_label == self._tb.default_schedule.name: schedule_label = "" else: schedule_label = ", " + schedule_label return schedule_label @property def compact_str(self): return "Interval({!r}{}): {} -> {} [{}]".format( self._loc, self._repr_schedule_label(), Workshift(self._tb, self._loc[0]).compact_str, Workshift(self._tb, self._loc[1]).compact_str, self._length, ) def __repr__(self): return self.compact_str def __str__(self): return self.compact_str + "\n\n{}".format(self.to_dataframe()) @property def start_time(self): # TODO: Refactor. This class has to know methods of Timeboard only return self._tb._timeline.get_ws_start_time(self._loc[0]) @property def end_time(self): # TODO: Refactor. This class has to know methods of Timeboard only return self._tb._timeline.get_ws_end_time(self._loc[1]) @property def length(self): return self._length @property def schedule(self): return self._schedule def to_dataframe(self): """Convert interval into `pandas.Dataframe`. Each workshift is represented as a row. The dataframe has the following columns: ================ ===================================================== Column Explanation ================ ===================================================== 'loc' zero-based position of the workshift on the timeline 'workshift' the reference time of the workshift 'start' the start time of the workshift 'end' the start time of the workshift 'duration' the number of base units in the workshift 'label' workshift's label ================ ===================================================== Returns ------- pandas.DataFrame """ return self._tb.to_dataframe(self._loc[0], self._loc[1]) def _find_my_bounds_in_idx(self, idx): # TODO: optimize this search left_bound = 0 len_idx = len(idx) while left_bound < len_idx and idx[left_bound] < self._loc[0]: left_bound += 1 if left_bound == len_idx: return None, None right_bound = len(idx) - 1 while right_bound >= left_bound and idx[right_bound] > self._loc[1]: right_bound -= 1 if right_bound < left_bound: return None, None return left_bound, right_bound def _get_duty_idx(self, duty, schedule): _duty_idx = { 'on': schedule.on_duty_index, 'off': schedule.off_duty_index, 'any': schedule.index } try: duty_idx = _duty_idx[duty] except KeyError: raise ValueError('Invalid `duty` parameter {!r}'.format(duty)) if duty != 'any': duty_idx_bounds = self._find_my_bounds_in_idx(duty_idx) else: duty_idx_bounds = self._loc return duty_idx, duty_idx_bounds def workshifts(self, duty='on', schedule=None): """ Return the generator that yields workshifts with the specified duty. The workshifts are yielded in order from the first to the last. Parameters ---------- duty : {``'on'``, ``'off``', ``'any``'} , optional (default ``'on'``) Duty of the workshifts to be yielded. If ``duty='on'``, off-duty workshifts are skipped, and vice versa. If ``duty='any'``, every workshift in the interval is yielded. schedule : _Schedule, optional If `schedule` is not given, the interval's schedule is used. Returns ------- generator Examples -------- >>> clnd = tb.Timeboard('D', '30 Sep 2017', '15 Oct 2017', layout=[0,1]) >>> ivl = tb.interval.Interval(clnd, (2,9)) >>> ivl Interval((2, 9)): 'D' at 2017-10-02 -> 'D' at 2017-10-09 [8] >>> for ws in ivl.workshifts(): ... print (ws.start_time, "\t", ws.label) 2017-10-03 00:00:00 1 2017-10-05 00:00:00 1 2017-10-07 00:00:00 1 2017-10-09 00:00:00 1 >>> list(ivl.workshifts(duty='off')) [Workshift(2) of 'D' at 2017-10-02, Workshift(4) of 'D' at 2017-10-04, Workshift(6) of 'D' at 2017-10-06, Workshift(8) of 'D' at 2017-10-08] """ if schedule is None: schedule = self.schedule duty_idx, duty_idx_bounds = self._get_duty_idx(duty, schedule) if duty_idx_bounds[0] is None or duty_idx_bounds[1] is None: return for i in duty_idx[duty_idx_bounds[0] : duty_idx_bounds[1] + 1]: yield Workshift(self._tb, i, schedule=schedule) def __next__(self): return next(self._ws_generator_activated) def next(self): return self.__next__() def __iter__(self): self._ws_generator_activated = self.workshifts(duty='any') return self def __len__(self): return self.length def nth(self, n, duty='on', schedule=None): """Return n-th workshift with the specified duty in the interval. Parameters ---------- n : int Zero-based sequence number of the workshift with the specified duty within the interval. Negative values count from the end toward the beginning of the interval (`n=-1` returns the last workshift with the specified duty). duty : {``'on'``, ``'off``', ``'any``'} , optional (default ``'on'``) Duty of workshifts to be counted. If ``duty='on'``, off-duty workshifts are ignored, and vice versa. If ``duty='any'``, all workshifts are counted whatever
import json from random import randint from typing import Callable, Dict, Optional from unittest.mock import patch import pytest from botocore.config import Config from botocore.stub import Stubber from aws_lambda_powertools.utilities.batch import ( BatchProcessor, EventType, PartialSQSProcessor, batch_processor, sqs_batch_processor, ) from aws_lambda_powertools.utilities.batch.exceptions import BatchProcessingError, SQSBatchProcessingError from aws_lambda_powertools.utilities.data_classes.dynamo_db_stream_event import DynamoDBRecord from aws_lambda_powertools.utilities.data_classes.kinesis_stream_event import KinesisStreamRecord from aws_lambda_powertools.utilities.data_classes.sqs_event import SQSRecord from aws_lambda_powertools.utilities.parser import BaseModel, validator from aws_lambda_powertools.utilities.parser.models import DynamoDBStreamChangedRecordModel, DynamoDBStreamRecordModel from aws_lambda_powertools.utilities.parser.models import KinesisDataStreamRecord as KinesisDataStreamRecordModel from aws_lambda_powertools.utilities.parser.models import KinesisDataStreamRecordPayload, SqsRecordModel from aws_lambda_powertools.utilities.parser.types import Literal from tests.functional.utils import b64_to_str, str_to_b64 @pytest.fixture(scope="module") def sqs_event_factory() -> Callable: def factory(body: str): return { "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", "body": body, "attributes": { "ApproximateReceiveCount": "1", "SentTimestamp": "1545082649183", "SenderId": "AIDAIENQZJOLO23YVJ4VO", "ApproximateFirstReceiveTimestamp": "1545082649185", }, "messageAttributes": {}, "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", "eventSource": "aws:sqs", "eventSourceARN": "arn:aws:sqs:us-east-2:123456789012:my-queue", "awsRegion": "us-east-1", } return factory @pytest.fixture(scope="module") def kinesis_event_factory() -> Callable: def factory(body: str): seq = "".join(str(randint(0, 9)) for _ in range(52)) return { "kinesis": { "kinesisSchemaVersion": "1.0", "partitionKey": "1", "sequenceNumber": seq, "data": str_to_b64(body), "approximateArrivalTimestamp": 1545084650.987, }, "eventSource": "aws:kinesis", "eventVersion": "1.0", "eventID": f"shardId-000000000006:{seq}", "eventName": "aws:kinesis:record", "invokeIdentityArn": "arn:aws:iam::123456789012:role/lambda-role", "awsRegion": "us-east-2", "eventSourceARN": "arn:aws:kinesis:us-east-2:123456789012:stream/lambda-stream", } return factory @pytest.fixture(scope="module") def dynamodb_event_factory() -> Callable: def factory(body: str): seq = "".join(str(randint(0, 9)) for _ in range(10)) return { "eventID": "1", "eventVersion": "1.0", "dynamodb": { "Keys": {"Id": {"N": "101"}}, "NewImage": {"Message": {"S": body}}, "StreamViewType": "NEW_AND_OLD_IMAGES", "SequenceNumber": seq, "SizeBytes": 26, }, "awsRegion": "us-west-2", "eventName": "INSERT", "eventSourceARN": "eventsource_arn", "eventSource": "aws:dynamodb", } return factory @pytest.fixture(scope="module") def record_handler() -> Callable: def handler(record): body = record["body"] if "fail" in body: raise Exception("Failed to process record.") return body return handler @pytest.fixture(scope="module") def kinesis_record_handler() -> Callable: def handler(record: KinesisStreamRecord): body = b64_to_str(record.kinesis.data) if "fail" in body: raise Exception("Failed to process record.") return body return handler @pytest.fixture(scope="module") def dynamodb_record_handler() -> Callable: def handler(record: DynamoDBRecord): body = record.dynamodb.new_image.get("Message").get_value if "fail" in body: raise Exception("Failed to process record.") return body return handler @pytest.fixture(scope="module") def config() -> Config: return Config(region_name="us-east-1") @pytest.fixture(scope="function") def partial_processor(config) -> PartialSQSProcessor: return PartialSQSProcessor(config=config) @pytest.fixture(scope="function") def partial_processor_suppressed(config) -> PartialSQSProcessor: return PartialSQSProcessor(config=config, suppress_exception=True) @pytest.fixture(scope="function") def stubbed_partial_processor(config) -> PartialSQSProcessor: processor = PartialSQSProcessor(config=config) with Stubber(processor.client) as stubber: yield stubber, processor @pytest.fixture(scope="function") def stubbed_partial_processor_suppressed(config) -> PartialSQSProcessor: processor = PartialSQSProcessor(config=config, suppress_exception=True) with Stubber(processor.client) as stubber: yield stubber, processor @pytest.fixture(scope="module") def order_event_factory() -> Callable: def factory(item: Dict) -> str: return json.dumps({"item": item}) return factory def test_partial_sqs_processor_context_with_failure(sqs_event_factory, record_handler, partial_processor): """ Test processor with one failing record """ fail_record = sqs_event_factory("fail") success_record = sqs_event_factory("success") records = [fail_record, success_record] response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []} with Stubber(partial_processor.client) as stubber: stubber.add_response("delete_message_batch", response) with pytest.raises(SQSBatchProcessingError) as error: with partial_processor(records, record_handler) as ctx: ctx.process() assert len(error.value.child_exceptions) == 1 stubber.assert_no_pending_responses() def test_partial_sqs_processor_context_only_success(sqs_event_factory, record_handler, partial_processor): """ Test processor without failure """ first_record = sqs_event_factory("success") second_record = sqs_event_factory("success") records = [first_record, second_record] with partial_processor(records, record_handler) as ctx: result = ctx.process() assert result == [ ("success", first_record["body"], first_record), ("success", second_record["body"], second_record), ] def test_partial_sqs_processor_context_multiple_calls(sqs_event_factory, record_handler, partial_processor): """ Test processor without failure """ first_record = sqs_event_factory("success") second_record = sqs_event_factory("success") records = [first_record, second_record] with partial_processor(records, record_handler) as ctx: ctx.process() with partial_processor([first_record], record_handler) as ctx: ctx.process() assert partial_processor.success_messages == [first_record] def test_batch_processor_middleware_with_partial_sqs_processor(sqs_event_factory, record_handler, partial_processor): """ Test middleware's integration with PartialSQSProcessor """ @batch_processor(record_handler=record_handler, processor=partial_processor) def lambda_handler(event, context): return True fail_record = sqs_event_factory("fail") event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("fail"), sqs_event_factory("success")]} response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []} with Stubber(partial_processor.client) as stubber: stubber.add_response("delete_message_batch", response) with pytest.raises(SQSBatchProcessingError) as error: lambda_handler(event, {}) assert len(error.value.child_exceptions) == 2 stubber.assert_no_pending_responses() @patch("aws_lambda_powertools.utilities.batch.sqs.PartialSQSProcessor") def test_sqs_batch_processor_middleware( patched_sqs_processor, sqs_event_factory, record_handler, stubbed_partial_processor ): """ Test middleware's integration with PartialSQSProcessor """ @sqs_batch_processor(record_handler=record_handler) def lambda_handler(event, context): return True stubber, processor = stubbed_partial_processor patched_sqs_processor.return_value = processor fail_record = sqs_event_factory("fail") event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("success")]} response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []} stubber.add_response("delete_message_batch", response) with pytest.raises(SQSBatchProcessingError) as error: lambda_handler(event, {}) assert len(error.value.child_exceptions) == 1 stubber.assert_no_pending_responses() def test_batch_processor_middleware_with_custom_processor(capsys, sqs_event_factory, record_handler, config): """ Test middlewares' integration with custom batch processor """ class CustomProcessor(PartialSQSProcessor): def failure_handler(self, record, exception): print("Oh no ! It's a failure.") return super().failure_handler(record, exception) processor = CustomProcessor(config=config) @batch_processor(record_handler=record_handler, processor=processor) def lambda_handler(event, context): return True fail_record = sqs_event_factory("fail") event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("success")]} response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []} with Stubber(processor.client) as stubber: stubber.add_response("delete_message_batch", response) with pytest.raises(SQSBatchProcessingError) as error: lambda_handler(event, {}) stubber.assert_no_pending_responses() assert len(error.value.child_exceptions) == 1 assert capsys.readouterr().out == "Oh no ! It's a failure.\n" def test_batch_processor_middleware_suppressed_exceptions( sqs_event_factory, record_handler, partial_processor_suppressed ): """ Test middleware's integration with PartialSQSProcessor """ @batch_processor(record_handler=record_handler, processor=partial_processor_suppressed) def lambda_handler(event, context): return True fail_record = sqs_event_factory("fail") event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("fail"), sqs_event_factory("success")]} response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []} with Stubber(partial_processor_suppressed.client) as stubber: stubber.add_response("delete_message_batch", response) result = lambda_handler(event, {}) stubber.assert_no_pending_responses() assert result is True def test_partial_sqs_processor_suppressed_exceptions(sqs_event_factory, record_handler, partial_processor_suppressed): """ Test processor without failure """ first_record = sqs_event_factory("success") second_record = sqs_event_factory("fail") records = [first_record, second_record] fail_record = sqs_event_factory("fail") response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []} with Stubber(partial_processor_suppressed.client) as stubber: stubber.add_response("delete_message_batch", response) with partial_processor_suppressed(records, record_handler) as ctx: ctx.process() assert partial_processor_suppressed.success_messages == [first_record] @patch("aws_lambda_powertools.utilities.batch.sqs.PartialSQSProcessor") def test_sqs_batch_processor_middleware_suppressed_exception( patched_sqs_processor, sqs_event_factory, record_handler, stubbed_partial_processor_suppressed ): """ Test middleware's integration with PartialSQSProcessor """ @sqs_batch_processor(record_handler=record_handler) def lambda_handler(event, context): return True stubber, processor = stubbed_partial_processor_suppressed patched_sqs_processor.return_value = processor fail_record = sqs_event_factory("fail") event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("success")]} response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []} stubber.add_response("delete_message_batch", response) result = lambda_handler(event, {}) stubber.assert_no_pending_responses() assert result is True def test_partial_sqs_processor_context_only_failure(sqs_event_factory, record_handler, partial_processor): """ Test processor with only failures """ first_record = sqs_event_factory("fail") second_record = sqs_event_factory("fail") records = [first_record, second_record] with pytest.raises(SQSBatchProcessingError) as error: with partial_processor(records, record_handler) as ctx: ctx.process() assert len(error.value.child_exceptions) == 2 def test_batch_processor_middleware_success_only(sqs_event_factory, record_handler): # GIVEN first_record = SQSRecord(sqs_event_factory("success")) second_record = SQSRecord(sqs_event_factory("success")) event = {"Records": [first_record.raw_event, second_record.raw_event]} processor = BatchProcessor(event_type=EventType.SQS) @batch_processor(record_handler=record_handler, processor=processor) def lambda_handler(event, context): return processor.response() # WHEN result = lambda_handler(event, {}) # THEN assert result["batchItemFailures"] == [] def test_batch_processor_middleware_with_failure(sqs_event_factory, record_handler): # GIVEN first_record = SQSRecord(sqs_event_factory("fail")) second_record = SQSRecord(sqs_event_factory("success")) event = {"Records": [first_record.raw_event, second_record.raw_event]} processor = BatchProcessor(event_type=EventType.SQS) @batch_processor(record_handler=record_handler, processor=processor) def lambda_handler(event, context): return processor.response() # WHEN result = lambda_handler(event, {}) # THEN assert len(result["batchItemFailures"]) == 1 def test_batch_processor_context_success_only(sqs_event_factory, record_handler): # GIVEN first_record = SQSRecord(sqs_event_factory("success")) second_record = SQSRecord(sqs_event_factory("success")) records = [first_record.raw_event, second_record.raw_event] processor = BatchProcessor(event_type=EventType.SQS) # WHEN with processor(records, record_handler) as batch: processed_messages = batch.process() # THEN assert processed_messages == [ ("success", first_record.body, first_record.raw_event), ("success", second_record.body, second_record.raw_event), ] assert batch.response() == {"batchItemFailures": []} def test_batch_processor_context_with_failure(sqs_event_factory, record_handler): # GIVEN first_record = SQSRecord(sqs_event_factory("failure")) second_record = SQSRecord(sqs_event_factory("success")) records = [first_record.raw_event, second_record.raw_event] processor = BatchProcessor(event_type=EventType.SQS) # WHEN with processor(records, record_handler) as batch: processed_messages = batch.process() # THEN assert processed_messages[1] == ("success", second_record.body, second_record.raw_event) assert len(batch.fail_messages) == 1 assert batch.response() == {"batchItemFailures": [{"itemIdentifier": first_record.message_id}]} def test_batch_processor_kinesis_context_success_only(kinesis_event_factory, kinesis_record_handler): # GIVEN first_record = KinesisStreamRecord(kinesis_event_factory("success")) second_record = KinesisStreamRecord(kinesis_event_factory("success")) records = [first_record.raw_event, second_record.raw_event] processor = BatchProcessor(event_type=EventType.KinesisDataStreams) # WHEN with processor(records, kinesis_record_handler) as batch: processed_messages = batch.process() # THEN assert processed_messages == [ ("success", b64_to_str(first_record.kinesis.data), first_record.raw_event), ("success", b64_to_str(second_record.kinesis.data), second_record.raw_event), ] assert batch.response() == {"batchItemFailures": []} def test_batch_processor_kinesis_context_with_failure(kinesis_event_factory, kinesis_record_handler): # GIVEN first_record = KinesisStreamRecord(kinesis_event_factory("failure")) second_record = KinesisStreamRecord(kinesis_event_factory("success")) records = [first_record.raw_event, second_record.raw_event] processor = BatchProcessor(event_type=EventType.KinesisDataStreams) # WHEN with processor(records, kinesis_record_handler) as batch: processed_messages = batch.process() # THEN assert processed_messages[1] == ("success", b64_to_str(second_record.kinesis.data), second_record.raw_event) assert len(batch.fail_messages) == 1 assert batch.response() == {"batchItemFailures": [{"itemIdentifier": first_record.kinesis.sequence_number}]} def test_batch_processor_kinesis_middleware_with_failure(kinesis_event_factory, kinesis_record_handler): # GIVEN first_record = KinesisStreamRecord(kinesis_event_factory("failure")) second_record = KinesisStreamRecord(kinesis_event_factory("success")) event = {"Records": [first_record.raw_event, second_record.raw_event]} processor = BatchProcessor(event_type=EventType.KinesisDataStreams) @batch_processor(record_handler=kinesis_record_handler, processor=processor) def lambda_handler(event, context): return processor.response() # WHEN result = lambda_handler(event, {}) # THEN assert len(result["batchItemFailures"]) == 1 def test_batch_processor_dynamodb_context_success_only(dynamodb_event_factory, dynamodb_record_handler): # GIVEN first_record = dynamodb_event_factory("success") second_record = dynamodb_event_factory("success") records = [first_record, second_record] processor = BatchProcessor(event_type=EventType.DynamoDBStreams) # WHEN with processor(records, dynamodb_record_handler) as batch: processed_messages = batch.process() # THEN assert processed_messages == [ ("success", first_record["dynamodb"]["NewImage"]["Message"]["S"], first_record), ("success", second_record["dynamodb"]["NewImage"]["Message"]["S"], second_record), ] assert batch.response() == {"batchItemFailures": []} def test_batch_processor_dynamodb_context_with_failure(dynamodb_event_factory, dynamodb_record_handler): # GIVEN first_record = dynamodb_event_factory("failure") second_record = dynamodb_event_factory("success") records = [first_record, second_record] processor = BatchProcessor(event_type=EventType.DynamoDBStreams) # WHEN with processor(records, dynamodb_record_handler) as batch: processed_messages = batch.process() # THEN assert processed_messages[1] == ("success", second_record["dynamodb"]["NewImage"]["Message"]["S"], second_record) assert len(batch.fail_messages) == 1 assert batch.response() == {"batchItemFailures": [{"itemIdentifier": first_record["dynamodb"]["SequenceNumber"]}]} def test_batch_processor_dynamodb_middleware_with_failure(dynamodb_event_factory, dynamodb_record_handler): # GIVEN first_record = dynamodb_event_factory("failure") second_record = dynamodb_event_factory("success") event = {"Records": [first_record, second_record]} processor = BatchProcessor(event_type=EventType.DynamoDBStreams) @batch_processor(record_handler=dynamodb_record_handler, processor=processor) def lambda_handler(event, context): return processor.response() # WHEN result = lambda_handler(event, {}) # THEN assert len(result["batchItemFailures"]) == 1 def test_batch_processor_context_model(sqs_event_factory, order_event_factory): # GIVEN class Order(BaseModel): item: dict class OrderSqs(SqsRecordModel): body: Order # auto transform json string # so Pydantic can auto-initialize nested Order model @validator("body", pre=True) def transform_body_to_dict(cls, value: str): return json.loads(value) def record_handler(record: OrderSqs): return record.body.item order_event = order_event_factory({"type": "success"}) first_record = sqs_event_factory(order_event) second_record = sqs_event_factory(order_event) records = [first_record, second_record] # WHEN processor = BatchProcessor(event_type=EventType.SQS, model=OrderSqs) with processor(records, record_handler)
computations for groups of features. """ # Compute contributions for groups of features self.contributions_groups = self.state.compute_grouped_contributions(self.contributions, features_groups) self.features_imp_groups = None # Update features dict with groups names self._update_features_dict_with_groups(features_groups=features_groups) # Compute t-sne projections for groups of features self.x_pred_groups = create_grouped_features_values(x_pred=self.x_pred, x_init=self.x_init, preprocessing=self.preprocessing, features_groups=self.features_groups, features_dict=self.features_dict, how='dict_of_values') # Compute data attribute for groups of features self.data_groups = self.state.assign_contributions( self.state.rank_contributions( self.contributions_groups, self.x_pred_groups ) ) def add(self, y_pred=None, label_dict=None, features_dict=None, title_story: str = None): """ add method allows the user to add a label_dict, features_dict or y_pred without compiling again (and it can last a few moments). y_pred can be used in the plot to color scatter. y_pred is needed in the to_pandas method. label_dict and features_dict displays allow to display clearer results. Parameters ---------- y_pred : pandas.Series, optional (default: None) Prediction values (1 column only). The index must be identical to the index of x_pred. label_dict: dict, optional (default: None) Dictionary mapping integer labels to domain names. features_dict: dict, optional (default: None) Dictionary mapping technical feature names to domain names. title_story: str (default: None) The default title is empty. You can specify a custom title which can be used the webapp, or other methods """ if y_pred is not None: self.y_pred = self.check_y_pred(y_pred) if label_dict is not None: if isinstance(label_dict, dict) is False: raise ValueError( """ label_dict must be a dict """ ) self.label_dict = label_dict self.check_label_dict() self.inv_label_dict = {v: k for k, v in self.label_dict.items()} if features_dict is not None: if isinstance(features_dict, dict) is False: raise ValueError( """ features_dict must be a dict """ ) self.features_dict = features_dict self.check_features_dict() self.inv_features_dict = {v: k for k, v in self.features_dict.items()} if title_story is not None: self.title_story = title_story def choose_state(self, contributions): """ Select implementation of the smart explainer. Typically check if it is a multi-class problem, in which case the implementation should be adapted to lists of contributions. Parameters ---------- contributions : object Local contributions. Could also be a list of local contributions. Returns ------- object SmartState or SmartMultiState, depending on the nature of the input. """ if isinstance(contributions, list): return MultiDecorator(SmartState()) else: return SmartState() def adapt_contributions(self, contributions): """ If _case is "classification" and contributions a np.array or pd.DataFrame this function transform contributions matrix in a list of 2 contributions matrices: Opposite contributions and contributions matrices. Parameters ---------- contributions : pandas.DataFrame, np.ndarray or list Returns ------- pandas.DataFrame, np.ndarray or list contributions object modified """ return adapt_contributions(self._case, contributions) def validate_contributions(self, contributions): """ Check len of list if _case is "classification" Check contributions object type if _case is "regression" Check type of contributions and transform into (list of) pd.Dataframe if necessary Parameters ---------- contributions : pandas.DataFrame, np.ndarray or list Returns ------- pandas.DataFrame or list """ check_contribution_object(self._case, self._classes, contributions) return self.state.validate_contributions(contributions, self.x_init) def get_interaction_values(self, n_samples_max=None, selection=None): """ Compute shap interaction values for each row of x_init. This function is only available for explainer of type TreeExplainer (used for tree based models). Please refer to the official tree shap paper for more information : https://arxiv.org/pdf/1802.03888.pdf Parameters ---------- n_samples_max : int, optional Limit the number of points for which we compute the interactions. selection : list, optional Contains list of index, subset of the input DataFrame that we want to plot Returns ------- np.ndarray Shap interaction values for each sample as an array of shape (# samples x # features x # features). """ x = copy.deepcopy(self.x_init) if selection: x = x.loc[selection] if hasattr(self, 'x_interaction'): if self.x_interaction.equals(x[:n_samples_max]): return self.interaction_values self.x_interaction = x[:n_samples_max] self.interaction_values = get_shap_interaction_values(self.x_interaction, self.explainer) return self.interaction_values def apply_preprocessing(self, contributions, preprocessing=None): """ Reconstruct contributions for original features, taken into account a preprocessing. Parameters ---------- contributions : object Local contributions, or list of local contributions. preprocessing : object Encoder taken from scikit-learn or category_encoders Returns ------- object Reconstructed local contributions in the original space. Can be a list. """ if preprocessing: return self.state.inverse_transform_contributions( contributions, preprocessing ) else: return contributions def check_postprocessing_modif_strings(self, postprocessing=None): """ Check if any modification of postprocessing will convert numeric values into strings values. If so, return True, otherwise False. Parameters ---------- postprocessing: dict Dict of postprocessing modifications to apply. Returns ------- modif: bool Boolean which is True if any numerical variable will be converted into string. """ modif = False if postprocessing is not None: for key in postprocessing.keys(): dict_postprocess = postprocessing[key] if dict_postprocess['type'] in {'prefix', 'suffix'} \ and pd.api.types.is_numeric_dtype(self.x_pred[key]): modif = True return modif def modify_postprocessing(self, postprocessing=None): """ Modifies postprocessing parameter, to change only keys, with features name, in case of parameters are not real feature names (with columns_dict, or inv_features_dict). Parameters ---------- postprocessing : Dict Dictionnary of postprocessing to modify. Returns ------- Dict Modified dictionnary, with same values but keys directly referencing to feature names. """ if postprocessing: new_dic = dict() for key in postprocessing.keys(): if key in self.features_dict: new_dic[key] = postprocessing[key] elif key in self.columns_dict.keys(): new_dic[self.columns_dict[key]] = postprocessing[key] elif key in self.inv_features_dict: new_dic[self.inv_features_dict[key]] = postprocessing[key] else: raise ValueError(f"Feature name '{key}' not found in the dataset.") return new_dic def check_postprocessing(self, postprocessing): """ Check that postprocessing parameter has good attributes. Check if postprocessing is a dictionnary, and if its parameters are good. Parameters ---------- postprocessing : dict Dictionnary of postprocessing that need to be checked. """ check_postprocessing(self.x_pred, postprocessing) def apply_postprocessing(self, postprocessing=None): """ Modifies x_pred Dataframe according to postprocessing modifications, if exists. Parameters ---------- postprocessing: Dict Dictionnary of postprocessing modifications to apply in x_pred. Returns ------- pandas.Dataframe Returns x_pred if postprocessing is empty, modified dataframe otherwise. """ if postprocessing: return apply_postprocessing(self.x_pred, postprocessing) else: return self.x_pred def check_y_pred(self, ypred=None): """ Check if y_pred is a one column dataframe of integer or float and if y_pred index matches x_pred index Parameters ---------- ypred: pandas.DataFrame (optional) User-specified prediction values. """ return check_ypred(self.x_pred, ypred) def check_model(self): """ Check if model has a predict_proba method is a one column dataframe of integer or float and if y_pred index matches x_pred index Returns ------- string: 'regression' or 'classification' according to the attributes of the model """ _case, _classes = check_model(self.model) return _case, _classes def check_label_dict(self): """ Check if label_dict and model _classes match """ if self._case != "regression": return check_label_dict(self.label_dict, self._case, self._classes) def check_features_dict(self): """ Check the features_dict and add the necessary keys if all the input X columns are not present """ for feature in (set(list(self.columns_dict.values())) - set(list(self.features_dict))): self.features_dict[feature] = feature def _update_features_dict_with_groups(self, features_groups): """ Add groups into features dict and inv_features_dict if not present. """ for group_name in features_groups.keys(): self.features_desc[group_name] = 1000 if group_name not in self.features_dict.keys(): self.features_dict[group_name] = group_name self.inv_features_dict[group_name] = group_name def check_contributions(self): """ Check if contributions and prediction set match in terms of shape and index. """ if not self.state.check_contributions(self.contributions, self.x_pred): raise ValueError( """ Prediction set and contributions should have exactly the same number of lines and number of columns. the order of the columns must be the same Please check x, contributions and preprocessing arguments. """ ) def check_label_name(self, label, origin=None): """ Convert a string label in integer. If the label is already an integer nothing is done. In all other cases an error is raised. Parameters ---------- label: int or string Integer (id) or string (business names) origin: None, 'num', 'code', 'value' (default: None) Kind of the label used in parameter Returns ------- tuple label num, label code (class of the mode), label value """ if origin is None: if label in self._classes: origin = 'code' elif self.label_dict is not None and label in self.label_dict.values(): origin = 'value' elif isinstance(label, int) and label in range(-1, len(self._classes)): origin = 'num' try: if origin == 'num': label_num = label label_code = self._classes[label] label_value = self.label_dict[label_code] if self.label_dict else label_code elif origin == 'code': label_code = label label_num = self._classes.index(label) label_value = self.label_dict[label_code] if self.label_dict else label_code elif origin == 'value': label_code = self.inv_label_dict[label] label_num = self._classes.index(label_code) label_value = label else: raise ValueError except ValueError: raise Exception({"message": "Origin must be 'num', 'code' or 'value'."}) except Exception: raise Exception({"message": f"Label
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from enum import Enum, EnumMeta from six import with_metaclass class _CaseInsensitiveEnumMeta(EnumMeta): def __getitem__(self, name): return super().__getitem__(name.upper()) def __getattr__(cls, name): """Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. """ try: return cls._member_map_[name.upper()] except KeyError: raise AttributeError(name) class Enum10(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum100(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" LINKS = "links" PAGES_URL = "pagesUrl" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum1000(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" LINKS = "links" PAGES_URL = "pagesUrl" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum1001(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum1002(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1003(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1004(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" ID_DESC = "id desc" SELF = "self" SELF_DESC = "self desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CREATED_BY = "createdBy" CREATED_BY_DESC = "createdBy desc" DISPLAY_NAME = "displayName" DISPLAY_NAME_DESC = "displayName desc" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_BY_DESC = "lastModifiedBy desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" IS_DEFAULT = "isDefault" IS_DEFAULT_DESC = "isDefault desc" LINKS = "links" LINKS_DESC = "links desc" PAGES_URL = "pagesUrl" PAGES_URL_DESC = "pagesUrl desc" class Enum1005(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" LINKS = "links" PAGES_URL = "pagesUrl" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum1006(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum1007(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" LINKS = "links" PAGES_URL = "pagesUrl" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum1008(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum1009(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" ID_DESC = "id desc" SELF = "self" SELF_DESC = "self desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CONTENT = "content" CONTENT_DESC = "content desc" CONTENT_URL = "contentUrl" CONTENT_URL_DESC = "contentUrl desc" CREATED_BY_APP_ID = "createdByAppId" CREATED_BY_APP_ID_DESC = "createdByAppId desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" LEVEL = "level" LEVEL_DESC = "level desc" LINKS = "links" LINKS_DESC = "links desc" ORDER = "order" ORDER_DESC = "order desc" TITLE = "title" TITLE_DESC = "title desc" USER_TAGS = "userTags" USER_TAGS_DESC = "userTags desc" class Enum101(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum1010(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CONTENT = "content" CONTENT_URL = "contentUrl" CREATED_BY_APP_ID = "createdByAppId" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LEVEL = "level" LINKS = "links" ORDER = "order" TITLE = "title" USER_TAGS = "userTags" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION = "parentSection" class Enum1011(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION = "parentSection" class Enum1012(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CONTENT = "content" CONTENT_URL = "contentUrl" CREATED_BY_APP_ID = "createdByAppId" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LEVEL = "level" LINKS = "links" ORDER = "order" TITLE = "title" USER_TAGS = "userTags" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION = "parentSection" class Enum1013(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION = "parentSection" class Enum1014(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" IS_SHARED = "isShared" LINKS = "links" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" USER_ROLE = "userRole" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1015(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1016(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" ID_DESC = "id desc" SELF = "self" SELF_DESC = "self desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CREATED_BY = "createdBy" CREATED_BY_DESC = "createdBy desc" DISPLAY_NAME = "displayName" DISPLAY_NAME_DESC = "displayName desc" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_BY_DESC = "lastModifiedBy desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTION_GROUPS_URL_DESC = "sectionGroupsUrl desc" SECTIONS_URL = "sectionsUrl" SECTIONS_URL_DESC = "sectionsUrl desc" class Enum1017(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1018(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1019(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum102(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" ID_DESC = "id desc" SELF = "self" SELF_DESC = "self desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CONTENT = "content" CONTENT_DESC = "content desc" CONTENT_URL = "contentUrl" CONTENT_URL_DESC = "contentUrl desc" CREATED_BY_APP_ID = "createdByAppId" CREATED_BY_APP_ID_DESC = "createdByAppId desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" LEVEL = "level" LEVEL_DESC = "level desc" LINKS = "links" LINKS_DESC = "links desc" ORDER = "order" ORDER_DESC = "order desc" TITLE = "title" TITLE_DESC = "title desc" USER_TAGS = "userTags" USER_TAGS_DESC = "userTags desc" class Enum1020(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1021(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" IS_SHARED = "isShared" LINKS = "links" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" USER_ROLE = "userRole" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1022(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1023(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1024(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1025(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" ID_DESC = "id desc" SELF = "self" SELF_DESC = "self desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CREATED_BY = "createdBy" CREATED_BY_DESC = "createdBy desc" DISPLAY_NAME = "displayName" DISPLAY_NAME_DESC = "displayName desc" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_BY_DESC = "lastModifiedBy desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTION_GROUPS_URL_DESC = "sectionGroupsUrl desc" SECTIONS_URL = "sectionsUrl" SECTIONS_URL_DESC = "sectionsUrl desc" class Enum1026(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum1027(with_metaclass(_CaseInsensitiveEnumMeta, str,
<reponame>Tinka8ell/RCBoat<filename>TestCode/GpioZeroBoat.py # !/usr/bin/python3 """ GpioZeroBoat - A Boat using gpioZero pin control Originally created as a implementation extending the gpiozero robot object. It added a third motor and a ruder. With the inclusion of the turrets that use stepper motors, not directly supported by gpiozero, they had to be implemented in a more specific way. """ from gpiozero import SourceMixin, CompositeDevice, Motor, Servo, Pin, Device, GPIOPinMissing def dp2(number): return format(number, "03.2f") def checkMotor(name, pins, pwm=True, pin_factory=None): # ## print("checkMotor:", name, pins, pwm, pin_factory) motor = None if isinstance(pins, tuple): # ## print("pins are tuple") motor = Motor(*pins, pwm=pwm, pin_factory=pin_factory) elif isinstance(pins, Motor): # ## print("pins are Motor") motor = pins # steal pins back from device pins = (motor.forward_device, motor.backward_device) # ## print("motor is", motor) if not motor: raise GPIOPinMissing( name + ' motor pins must be given as tuple or a Motor object') pins = pins[0:2] # just the first two return motor, pins class GPIOZeroBoat(SourceMixin, CompositeDevice): """ Extends :class:`CompositeDevice` to represent a generic tri-motor and rudder (servo) boat. This class is constructed with three tuples representing the forward and backward pins of the left, right and center controllers respectively. :param tuple left: A tuple of two (or three) GPIO pins representing the forward and backward inputs of the left motor's controller. Use three pins if your motor controller requires an enable pin. :param tuple right: A tuple of two (or three) GPIO pins representing the forward and backward inputs of the right motor's controller. Use three pins if your motor controller requires an enable pin. :param tuple center: A tuple of two (or three) GPIO pins representing the forward and backward inputs of the center motor's controller. Use three pins if your motor controller requires an enable pin. :param servo rudder: A GPIO pin representing the input of the servo controlling the rudder. :param bool pwm: If :data:`True` (the default), construct :class:`PWMOutputDevice` instances for the motor controller pins, allowing both direction and variable speed control. If :data:`False`, construct :class:`DigitalOutputDevice` instances, allowing only direction control. :type pin_factory: Factory or None :param pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). .. attribute:: left_motor The :class:`Motor` on the left of the boat. .. attribute:: right_motor The :class:`Motor` on the right of the boat. .. attribute:: center_motor The :class:`Motor` in the center of the boat. .. attribute:: rudder The :class:`Servo` for the rudder of the boat. """ def __init__(self, left=None, right=None, center=None, rudder=None, pwm=True, pin_factory=None, *args): # *args is a hack to ensure a useful message is shown when pins are # supplied as sequential positional arguments e.g. 2, 3, 4, 5 # Check each subdevise and add pins to monitoring ... left_motor, pins = checkMotor( "left", left, pwm=pwm, pin_factory=pin_factory) if left_motor: self.pins = list(pins) right_motor, pins = checkMotor( "right", right, pwm=pwm, pin_factory=pin_factory) if right_motor: self.pins += list(pins) center_motor, pins = checkMotor( "center", center, pwm=pwm, pin_factory=pin_factory) if center_motor: self.pins += list(pins) if left_motor and not right_motor: raise GPIOPinMissing('Right motor must be given as wll as left') if not left_motor and right_motor: raise GPIOPinMissing('Left motor must be given as wll as right') if not left_motor and not center_motor: raise GPIOPinMissing('At least one motor must be given') if rudder: if not isinstance(rudder, Servo): rudder = Servo(rudder) else: raise GPIOPinMissing('Must provide a Servo as a rudder') self.pins.append(rudder.pwm_device) for i in range(len(self.pins)): if isinstance(self.pins[i], Device): self.pins[i] = self.pins[i].pin elif not isinstance(self.pins[i], Pin): if pin_factory: self.pins[i] = pin_factory.pin(self.pins[i]) else: self.pins[i] = Device.pin_factory.pin(self.pins[i]) # initialise parent motors = [] items = {} order = [] if left_motor: # also must be right motor motors.append(left_motor) items["left_motor"] = left_motor order.append("left_motor") motors.append(right_motor) items["right_motor"] = right_motor order.append("right_motor") if center_motor: motors.append(center_motor) items["center_motor"] = center_motor order.append("center_motor") self.motors = tuple(motors) items["rudder"] = rudder order.append("rudder") items["pin_factory"] = pin_factory super(GPIOZeroBoat, self).__init__(**items) ''' balancing ratios: thrustDelta - ratio from rudder setting to thrust modification leftDelta - ratio to reduce left hand motor by to balance rightDelta - ratio to reduce right hand motor by to balance centerDelta - ratio to reduce center motor by to balance toServo - ratio to adjust rudder setting to match servo range ''' self.thrustDelta = 1.0 self.leftDelta = 1.0 self.rightDelta = 1.0 self.centerDelta = 1.0 self.toServo = 1.0 # initialise the motors and servo if self.left_motor: self.left_motor.stop() if self.right_motor: self.right_motor.stop() if self.center_motor: self.center_motor.stop() self.rudder.mid() # LimmitedSteppers are assumed to be in default position self._debug = False return @property def value(self): """ Represents the motion of the boat as a tuple of (left_motor_speed, right_motor_speed, center_motor_speed, rudder_angle) with ``(0, 0, 0, 0)`` representing stopped. """ return super(GPIOZeroBoat, self).value # what if there is bias? - multiplier for left/right/center so none > 1 # this should be done on the gpioZeroBoat side of things ... @value.setter def value(self, value): values = tuple(value) for motor in self.motors: motor.value = values[0] values = values[1:] self.rudder.value = values[0] values = values[1:] self.debug("set value:", self.value) return def navigate(self, x, y): """ Control the boat by setting left/right to x , and forward/backward to y. Treat as a joystick setting. Take the position forward / backward, left / right joystick. 0.0 < y <= 1.0 - amount of forward thrust 0.0 > y >= -1.0 - amount of backward thrust 0.0 < x <= 1.0 - amount of right turn 0.0 > x >= -1.0 - amount of left turn All three motors will give an average of the forward or backward throttle, but the left and right motors will be modified by a delta based on the amount of requested turn. As the thrust of each motor is max'd out at 1.0, the delta has a cut off at the point any motor reaches full throttle. The ratio between turn and thrust delta is adjustable / defineable. self.thrustDelta will define this, default is 1 (1 to 1) The ration of thrust to actual power of the motors is also adjustable / definable to balance any natural imperfections. self.leftDelta, self.rightDelta (and possibly) self.centerDelta covers this. """ left, right, center = y, y, y # straight ahead rudder = x if x < 0: # turn left if y < 0: # going backward cap = 1.0 + y # max amount we change by delta = -min(-x * self.thrustDelta, cap) else: # going forwards cap = 1.0 - y # max amount we change by delta = min(-x * self.thrustDelta, cap) left -= delta right += delta else: # turn right if y < 0: # going backward cap = 1.0 + y # max amount we change by delta = -min(x * self.thrustDelta, cap) else: # going forwards cap = 1.0 - y # max amount we change by delta = min(x * self.thrustDelta, cap) left += delta right -= delta # print("Setting LRC+:", int(100*left), int(100*right), # int(100*center), int(100*rudder)) left *= self.leftDelta right *= self.rightDelta center *= self.centerDelta rudder *= self.toServo # print("Actual LRC+:", int(100*left), int(100*right), # int(100*center), int(100*rudder)) if self.left_motor: self.left_motor.value = left if self.right_motor: self.right_motor.value = right if self.center_motor: self.center_motor.value = center self.rudder.value = rudder return def forward(self, speed=1, **kwargs): """ Drive the boat forward by running all motors forward. :param float speed: Speed at which to drive the motors, as a value between 0 (stopped) and 1 (full speed). The default is 1. :param float curve_left: The amount to curve left while moving forwards, by driving the left motor at a slower speed. Maximum *curve_left* is 1, the default is 0 (no curve). This parameter can only be specified as a keyword parameter, and is mutually exclusive with *curve_right*. :param float curve_right: The amount to curve right while moving forwards, by driving the right motor at a slower speed. Maximum *curve_right* is 1, the default is 0 (no curve). This parameter can only be specified as a keyword parameter, and is mutually exclusive with *curve_left*. """ curve_left = kwargs.pop('curve_left', 0) curve_right =
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import base64 import itertools import os import textwrap import pytest from cryptography.exceptions import UnsupportedAlgorithm, _Reasons from cryptography.hazmat.backends.interfaces import ( DERSerializationBackend, DSABackend, EllipticCurveBackend, PEMSerializationBackend, RSABackend ) from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa from cryptography.hazmat.primitives.serialization import ( BestAvailableEncryption, load_der_private_key, load_der_public_key, load_pem_private_key, load_pem_public_key, load_ssh_public_key ) from .test_ec import _skip_curve_unsupported from .utils import ( _check_dsa_private_numbers, _check_rsa_private_numbers, load_vectors_from_file ) from ...utils import raises_unsupported_algorithm @pytest.mark.requires_backend_interface(interface=DERSerializationBackend) class TestDERSerialization(object): @pytest.mark.requires_backend_interface(interface=RSABackend) @pytest.mark.parametrize( ("key_path", "password"), [ (["DER_Serialization", "enc-rsa-pkcs8.der"], b"foobar"), (["DER_Serialization", "enc2-rsa-pkcs8.der"], b"baz"), (["DER_Serialization", "unenc-rsa-pkcs8.der"], None), (["DER_Serialization", "testrsa.der"], None), ] ) def test_load_der_rsa_private_key(self, key_path, password, backend): key = load_vectors_from_file( os.path.join("asymmetric", *key_path), lambda derfile: load_der_private_key( derfile.read(), password, backend ), mode="rb" ) assert key assert isinstance(key, rsa.RSAPrivateKey) if isinstance(key, rsa.RSAPrivateKeyWithNumbers): _check_rsa_private_numbers(key.private_numbers()) @pytest.mark.requires_backend_interface(interface=DSABackend) @pytest.mark.parametrize( ("key_path", "password"), [ (["DER_Serialization", "unenc-dsa-pkcs8.der"], None), (["DER_Serialization", "dsa.1024.der"], None), (["DER_Serialization", "dsa.2048.der"], None), (["DER_Serialization", "dsa.3072.der"], None), ] ) def test_load_der_dsa_private_key(self, key_path, password, backend): key = load_vectors_from_file( os.path.join("asymmetric", *key_path), lambda derfile: load_der_private_key( derfile.read(), password, backend ), mode="rb" ) assert key assert isinstance(key, dsa.DSAPrivateKey) if isinstance(key, dsa.DSAPrivateKeyWithNumbers): _check_dsa_private_numbers(key.private_numbers()) @pytest.mark.parametrize( ("key_path", "password"), [ (["DER_Serialization", "ec_private_key.der"], None), (["DER_Serialization", "ec_private_key_encrypted.der"], b"123456"), ] ) @pytest.mark.requires_backend_interface(interface=EllipticCurveBackend) def test_load_der_ec_private_key(self, key_path, password, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key = load_vectors_from_file( os.path.join("asymmetric", *key_path), lambda derfile: load_der_private_key( derfile.read(), password, backend ), mode="rb" ) assert key assert isinstance(key, ec.EllipticCurvePrivateKey) assert key.curve.name == "secp256r1" assert key.curve.key_size == 256 @pytest.mark.parametrize( "key_path", [ ["DER_Serialization", "enc-rsa-pkcs8.der"], ] ) @pytest.mark.requires_backend_interface(interface=RSABackend) def test_wrong_password(self, key_path, backend): key_file = os.path.join("asymmetric", *key_path) password = b"<PASSWORD>" with pytest.raises(ValueError): load_vectors_from_file( key_file, lambda derfile: load_der_private_key( derfile.read(), password, backend ), mode="rb" ) @pytest.mark.parametrize( "key_path", [ ["DER_Serialization", "unenc-rsa-pkcs8.der"] ] ) @pytest.mark.requires_backend_interface(interface=RSABackend) def test_unused_password(self, key_path, backend): key_file = os.path.join("asymmetric", *key_path) password = b"<PASSWORD>" with pytest.raises(TypeError): load_vectors_from_file( key_file, lambda derfile: load_der_private_key( derfile.read(), password, backend ), mode="rb" ) @pytest.mark.parametrize( ("key_path", "password"), itertools.product( [ ["DER_Serialization", "enc-rsa-pkcs8.der"], ], [b"", None] ) ) @pytest.mark.requires_backend_interface(interface=RSABackend) def test_missing_password(self, key_path, password, backend): key_file = os.path.join("asymmetric", *key_path) with pytest.raises(TypeError): load_vectors_from_file( key_file, lambda derfile: load_der_private_key( derfile.read(), password, backend ), mode="rb" ) def test_wrong_format(self, backend): key_data = b"---- NOT A KEY ----\n" with pytest.raises(ValueError): load_der_private_key( key_data, None, backend ) with pytest.raises(ValueError): load_der_private_key( key_data, b"this password will not be used", backend ) def test_corrupt_der_pkcs8(self, backend): # unenc-rsa-pkcs8 with a bunch of data missing. key_data = textwrap.dedent("""\ <KEY> <KEY> mu/UpE/BRZmR """).encode() bad_der = base64.b64decode(b"".join(key_data.splitlines())) with pytest.raises(ValueError): load_der_private_key( bad_der, None, backend ) with pytest.raises(ValueError): load_der_private_key( bad_der, b"this password will not be used", backend ) def test_corrupt_traditional_format_der(self, backend): # privkey with a bunch of data missing. key_data = textwrap.dedent("""\ <KEY>VKwVhc2LwGKHE0DZM= """).encode() bad_der = base64.b64decode(b"".join(key_data.splitlines())) with pytest.raises(ValueError): load_pem_private_key(bad_der, None, backend) with pytest.raises(ValueError): load_pem_private_key( bad_der, b"this password will not be used", backend ) @pytest.mark.parametrize( "key_file", [ os.path.join( "asymmetric", "DER_Serialization", "unenc-rsa-pkcs8.pub.der"), os.path.join( "asymmetric", "DER_Serialization", "rsa_public_key.der"), os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.der"), ] ) @pytest.mark.requires_backend_interface(interface=RSABackend) def test_load_der_rsa_public_key(self, key_file, backend): key = load_vectors_from_file( key_file, lambda derfile: load_der_public_key( derfile.read(), backend ), mode="rb" ) assert key assert isinstance(key, rsa.RSAPublicKey) if isinstance(key, rsa.RSAPublicKeyWithNumbers): numbers = key.public_numbers() assert numbers.e == 65537 def test_load_der_invalid_public_key(self, backend): with pytest.raises(ValueError): load_der_public_key(b"invalid data", backend) @pytest.mark.parametrize( "key_file", [ os.path.join( "asymmetric", "DER_Serialization", "unenc-dsa-pkcs8.pub.der"), os.path.join( "asymmetric", "DER_Serialization", "dsa_public_key.der"), ] ) @pytest.mark.requires_backend_interface(interface=DSABackend) def test_load_der_dsa_public_key(self, key_file, backend): key = load_vectors_from_file( key_file, lambda derfile: load_der_public_key( derfile.read(), backend ), mode="rb" ) assert key assert isinstance(key, dsa.DSAPublicKey) @pytest.mark.requires_backend_interface(interface=EllipticCurveBackend) def test_load_ec_public_key(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key = load_vectors_from_file( os.path.join( "asymmetric", "DER_Serialization", "ec_public_key.der"), lambda derfile: load_der_public_key( derfile.read(), backend ), mode="rb" ) assert key assert isinstance(key, ec.EllipticCurvePublicKey) assert key.curve.name == "secp256r1" assert key.curve.key_size == 256 @pytest.mark.requires_backend_interface(interface=PEMSerializationBackend) class TestPEMSerialization(object): @pytest.mark.parametrize( ("key_file", "password"), [ (["PEM_Serialization", "rsa_private_key.pem"], b"123456"), (["PKCS8", "unenc-rsa-pkcs8.pem"], None), (["PKCS8", "enc-rsa-pkcs8.pem"], b"foobar"), (["PKCS8", "enc2-rsa-pkcs8.pem"], b"baz"), (["PKCS8", "pkcs12_s2k_pem-X_9607.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9671.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9925.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9926.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9927.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9928.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9929.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9930.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9931.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9932.pem"], b"123456"), (["Traditional_OpenSSL_Serialization", "key1.pem"], b"123456"), (["Traditional_OpenSSL_Serialization", "key2.pem"], b"a123456"), (["Traditional_OpenSSL_Serialization", "testrsa.pem"], None), (["Traditional_OpenSSL_Serialization", "testrsa-encrypted.pem"], b"password"), ] ) def test_load_pem_rsa_private_key(self, key_file, password, backend): key = load_vectors_from_file( os.path.join("asymmetric", *key_file), lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ) ) assert key assert isinstance(key, rsa.RSAPrivateKey) if isinstance(key, rsa.RSAPrivateKeyWithNumbers): _check_rsa_private_numbers(key.private_numbers()) @pytest.mark.parametrize( ("key_path", "password"), [ (["Traditional_OpenSSL_Serialization", "dsa.1024.pem"], None), (["Traditional_OpenSSL_Serialization", "dsa.2048.pem"], None), (["Traditional_OpenSSL_Serialization", "dsa.3072.pem"], None), (["PKCS8", "unenc-dsa-pkcs8.pem"], None), (["PEM_Serialization", "dsa_private_key.pem"], b"123456"), ] ) def test_load_dsa_private_key(self, key_path, password, backend): key = load_vectors_from_file( os.path.join("asymmetric", *key_path), lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ) ) assert key assert isinstance(key, dsa.DSAPrivateKey) if isinstance(key, dsa.DSAPrivateKeyWithNumbers): _check_dsa_private_numbers(key.private_numbers()) @pytest.mark.parametrize( ("key_path", "password"), [ (["PKCS8", "ec_private_key.pem"], None), (["PKCS8", "ec_private_key_encrypted.pem"], b"123456"), (["PEM_Serialization", "ec_private_key.pem"], None), (["PEM_Serialization", "ec_private_key_encrypted.pem"], b"123456"), ] ) @pytest.mark.requires_backend_interface(interface=EllipticCurveBackend) def test_load_pem_ec_private_key(self, key_path, password, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key = load_vectors_from_file( os.path.join("asymmetric", *key_path), lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ) ) assert key assert isinstance(key, ec.EllipticCurvePrivateKey) assert key.curve.name == "secp256r1" assert key.curve.key_size == 256 @pytest.mark.parametrize( ("key_file"), [ os.path.join("asymmetric", "PKCS8", "unenc-rsa-pkcs8.pub.pem"), os.path.join( "asymmetric", "PEM_Serialization", "rsa_public_key.pem"), os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.pem"), ] ) def test_load_pem_rsa_public_key(self, key_file, backend): key = load_vectors_from_file( key_file, lambda pemfile: load_pem_public_key( pemfile.read().encode(), backend ) ) assert key assert isinstance(key, rsa.RSAPublicKey) if isinstance(key, rsa.RSAPublicKeyWithNumbers): numbers = key.public_numbers() assert numbers.e == 65537 @pytest.mark.parametrize( ("key_file"), [ os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pub.pem"), os.path.join( "asymmetric", "PEM_Serialization", "dsa_public_key.pem"), ] ) def test_load_pem_dsa_public_key(self, key_file, backend): key = load_vectors_from_file( key_file, lambda pemfile: load_pem_public_key( pemfile.read().encode(), backend ) ) assert key assert isinstance(key, dsa.DSAPublicKey) @pytest.mark.requires_backend_interface(interface=EllipticCurveBackend) def test_load_ec_public_key(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key = load_vectors_from_file( os.path.join( "asymmetric", "PEM_Serialization", "ec_public_key.pem"), lambda pemfile: load_pem_public_key( pemfile.read().encode(), backend ) ) assert key assert isinstance(key, ec.EllipticCurvePublicKey) assert key.curve.name == "secp256r1" assert key.curve.key_size == 256 def test_rsa_traditional_encrypted_values(self, backend): pkey = load_vectors_from_file( os.path.join( "asymmetric", "Traditional_OpenSSL_Serialization", "key1.pem"), lambda pemfile: load_pem_private_key( pemfile.read().encode(), b"123456", backend ) ) assert pkey numbers = pkey.private_numbers() assert numbers.p == int( "fb7d316fc51531b36d93adaefaf52db6ad5beb793d37c4cf9dfc1ddd17cfbafb", 16 ) assert numbers.q == int( "df98264e646de9a0fbeab094e31caad5bc7adceaaae3c800ca0275dd4bb307f5", 16 ) assert numbers.d == int( "db4848c36f478dd5d38f35ae519643b6b810d404bcb76c00e44015e56ca1cab0" "<KEY>", 16 ) assert numbers.dmp1 == int( "ce997f967192c2bcc3853186f1559fd355c190c58ddc15cbf5de9b6df954c727", 16 ) assert numbers.dmq1 == int( "b018a57ab20ffaa3862435445d863369b852cf70a67c55058213e3fe10e3848d", 16 ) assert numbers.iqmp == int( "6a8d830616924f5cf2d1bc1973f97fde6b63e052222ac7be06aa2532d10bac76", 16 ) assert numbers.public_numbers.e == 65537 assert numbers.public_numbers.n == int( "dba786074f2f0350ce1d99f5aed5b520cfe0deb5429ec8f2a88563763f566e77" "9814b7c310e5326edae31198eed439b845dd2db99eaa60f5c16a43f4be6bcf37", 16 ) @pytest.mark.parametrize( "key_path", [ ["Traditional_OpenSSL_Serialization", "testrsa.pem"], ["PKCS8", "unenc-rsa-pkcs8.pem"] ] ) def test_unused_password(self, key_path, backend): key_file = os.path.join("asymmetric", *key_path) password = b"<PASSWORD>" with pytest.raises(TypeError): load_vectors_from_file( key_file, lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ) ) @pytest.mark.parametrize( "key_path", [ ["Traditional_OpenSSL_Serialization", "testrsa-encrypted.pem"], ["PKCS8", "enc-rsa-pkcs8.pem"] ] ) def test_wrong_password(self, key_path, backend): key_file = os.path.join("asymmetric", *key_path) password = b"<PASSWORD>" with pytest.raises(ValueError): load_vectors_from_file( key_file, lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ) ) @pytest.mark.parametrize( ("key_path", "password"), itertools.product( [ ["Traditional_OpenSSL_Serialization", "testrsa-encrypted.pem"], ["PKCS8", "enc-rsa-pkcs8.pem"], ], [b"", None] ) ) def test_missing_password(self, key_path, password, backend): key_file = os.path.join("asymmetric", *key_path) with pytest.raises(TypeError): load_vectors_from_file( key_file, lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ) ) def test_wrong_private_format(self, backend): key_data = b"---- NOT A KEY ----\n" with pytest.raises(ValueError): load_pem_private_key( key_data, None, backend ) with pytest.raises(ValueError): load_pem_private_key( key_data, b"this password will not be used", backend ) def test_wrong_public_format(self, backend): key_data = b"---- NOT A KEY ----\n" with pytest.raises(ValueError): load_pem_public_key(key_data, backend) def test_corrupt_traditional_format(self, backend): # privkey.pem with a bunch of data missing. key_data = textwrap.dedent("""\ -----BEGIN RSA PRIVATE KEY----- <KEY> rD1qFBAVfoQFiOH9uPJgMaoAuoQEisPHVcZDKcOv4wEg6/TInAIXBnEigtqvRzuy mvcpHZwQJdmdHHkGKAs37Dfxi67HbkUCIQCeZGliHXFa071Fp06ZeWlR2ADonTZz rJBhdTe0v5pCeQIhAIZfkiGgGBX4cIuuckzEm43g9WMUjxP/0GlK39vIyihxAiEA mymehFRT0MvqW5xAKAx7Pgkt8HVKwVhc2LwGKHE0DZM= -----END RSA PRIVATE KEY----- """).encode() with pytest.raises(ValueError): load_pem_private_key( key_data, None, backend ) with pytest.raises(ValueError): load_pem_private_key( key_data, b"this password will not be used", backend ) def test_traditional_encrypted_corrupt_format(self, backend): # privkey.pem with a single bit flipped key_data = textwrap.dedent("""\ -----BEGIN RSA PRIVATE KEY----- Proc-Type: <,ENCRYPTED DEK-Info: AES-128-CBC,5E22A2BD85A653FB7A3ED20DE84F54CD hAqtb5ZkTMGcs4BBDQ1SKZzdQThWRDzEDxM3qBfjvYa35KxZ54aic013mW/lwj2I v5bbpOjrHYHNAiZYZ7RNb+ztbF6F/g5PA5g7mFwEq+LFBY0InIplYBSv9QtE+lot Dy4AlZa/+NzJwgdKDb+JVfk5SddyD4ywnyeORnMPy4xXKvjXwmW+iLibZVKsjIgw H8hSxcD+FhWyJm9h9uLtmpuqhQo0jTUYpnTezZx2xeVPB53Ev7YCxR9Nsgj5GsVf 9Z/hqLB7IFgM3pa0z3PQeUIZF/cEf72fISWIOBwwkzVrPUkXWfbuWeJXQXSs3amE 5A295jD9BQp9CY0nNFSsy+qiXWToq2xT3y5zVNEStmN0SCGNaIlUnJzL9IHW+oMI kPmXZMnAYBWeeCF1gf3J3aE5lZInegHNfEI0+J0LazC2aNU5Dg/BNqrmRqKWEIo/ -----END RSA PRIVATE KEY----- """).encode() password = b"<PASSWORD>" with pytest.raises(ValueError): load_pem_private_key( key_data, None, backend ) with pytest.raises(ValueError): load_pem_private_key( key_data, password, backend ) def test_unsupported_key_encryption(self, backend): key_data = textwrap.dedent("""\ -----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED DEK-Info: FAKE-123,5E22A2BD85A653FB7A3ED20DE84F54CD hAqtb5ZkTMGcs4BBDQ1SKZzdQThWRDzEDxM3qBfjvYa35KxZ54aic013mW/lwj2I v5bbpOjrHYHNAiZYZ7RNb+ztbF6F/g5PA5g7mFwEq+LFBY0InIplYBSv9QtE+lot Dy4AlZa/+NzJwgdKDb+JVfk5SddyD4ywnyeORnMPy4xXKvjXwmW+iLibZVKsjIgw H8hSxcD+FhWyJm9h9uLtmpuqhQo0jTUYpnTezZx2xeVPB53Ev7YCxR9Nsgj5GsVf 9Z/hqLB7IF<KEY>ZF/cEf72fISWIOBwwkzVrPUkXWfbuWeJXQXSs3amE 5A295jD9BQp9CY0nNFSsy+qiXWToq2xT3y5zVNEStmN0SCGNaIlUnJzL9IHW+oMI kPmXZMnAYBWeeCF1gf3J3aE5lZInegHNfEI0+J0LazC2aNU5Dg/BNqrmRqKWEIo/ -----END RSA PRIVATE KEY----- """).encode() password = b"password" with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER): load_pem_private_key( key_data, password, backend ) def test_corrupt_pkcs8_format(self, backend): # unenc-rsa-pkcs8.pem with a bunch of data missing. key_data = textwrap.dedent("""\ -----BEGIN PRIVATE KEY----- <KEY> we2p/bd2k0HYyCKUGnf2nMPDiQJBAI75pwittSoE240EobUGIDTSz8CJsXIxuDmL z+KOpdpPRR5TQmbEMEspjsFpFymMiuYPgmihQbO2cJl1qScY5OkCQQ<KEY>l Xxg/SNpjEIv+qAyUD96XVlOJlOIeLHQ8kYE0C6ZA+M<KEY>8Yn0lU/X0/ mu/UpE/BRZmR -----END PRIVATE KEY----- """).encode() with pytest.raises(ValueError): load_pem_private_key( key_data, None, backend ) with pytest.raises(ValueError): load_pem_private_key( key_data, b"this password will not be used", backend ) def test_pks8_encrypted_corrupt_format(self, backend): # enc-rsa-pkcs8.pem with some bits flipped. key_data = textwrap.dedent("""\ -----BEGIN ENCRYPTED PRIVATE KEY----- <KEY> -----END ENCRYPTED PRIVATE KEY----- """).encode() password = b"<PASSWORD>" with pytest.raises(ValueError): load_pem_private_key( key_data, None, backend